Squashed 'third_party/ceres/' content from commit e51e9b4

Change-Id: I763587619d57e594d3fa158dc3a7fe0b89a1743b
git-subtree-dir: third_party/ceres
git-subtree-split: e51e9b46f6ca88ab8b2266d0e362771db6d98067
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
new file mode 100644
index 0000000..5bbe2bd
--- /dev/null
+++ b/internal/ceres/CMakeLists.txt
@@ -0,0 +1,501 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2015 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: keir@google.com (Keir Mierle)
+
+# Avoid 'xxx.cc has no symbols' warnings from source files which are 'empty'
+# when their enclosing #ifdefs are disabled.
+if (CERES_THREADING_MODEL STREQUAL "CXX11_THREADS")
+  set(CERES_PARALLEL_FOR_SRC parallel_for_cxx.cc thread_pool.cc)
+elseif (CERES_THREADING_MODEL STREQUAL "OPENMP")
+  set(CERES_PARALLEL_FOR_SRC parallel_for_openmp.cc)
+  if (CMAKE_COMPILER_IS_GNUCXX)
+    # OpenMP in GCC requires the GNU OpenMP library.
+    list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES gomp)
+  endif()
+elseif (CERES_THREADING_MODEL STREQUAL "NO_THREADS")
+  set(CERES_PARALLEL_FOR_SRC parallel_for_nothreads.cc)
+endif()
+
+set(CERES_INTERNAL_SRC
+    ${CERES_PARALLEL_FOR_SRC}
+    accelerate_sparse.cc
+    array_utils.cc
+    blas.cc
+    block_evaluate_preparer.cc
+    block_jacobi_preconditioner.cc
+    block_jacobian_writer.cc
+    block_random_access_dense_matrix.cc
+    block_random_access_diagonal_matrix.cc
+    block_random_access_matrix.cc
+    block_random_access_sparse_matrix.cc
+    block_sparse_matrix.cc
+    block_structure.cc
+    c_api.cc
+    canonical_views_clustering.cc
+    cgnr_solver.cc
+    callbacks.cc
+    compressed_col_sparse_matrix_utils.cc
+    compressed_row_jacobian_writer.cc
+    compressed_row_sparse_matrix.cc
+    conditioned_cost_function.cc
+    conjugate_gradients_solver.cc
+    context.cc
+    context_impl.cc
+    coordinate_descent_minimizer.cc
+    corrector.cc
+    covariance.cc
+    covariance_impl.cc
+    cxsparse.cc
+    dense_normal_cholesky_solver.cc
+    dense_qr_solver.cc
+    dense_sparse_matrix.cc
+    detect_structure.cc
+    dogleg_strategy.cc
+    dynamic_compressed_row_jacobian_writer.cc
+    dynamic_compressed_row_sparse_matrix.cc
+    dynamic_sparse_normal_cholesky_solver.cc
+    evaluator.cc
+    eigensparse.cc
+    file.cc
+    float_suitesparse.cc
+    float_cxsparse.cc
+    function_sample.cc
+    gradient_checker.cc
+    gradient_checking_cost_function.cc
+    gradient_problem.cc
+    gradient_problem_solver.cc
+    implicit_schur_complement.cc
+    inner_product_computer.cc
+    is_close.cc
+    iterative_refiner.cc
+    iterative_schur_complement_solver.cc
+    levenberg_marquardt_strategy.cc
+    lapack.cc
+    line_search.cc
+    line_search_direction.cc
+    line_search_minimizer.cc
+    line_search_preprocessor.cc
+    linear_least_squares_problems.cc
+    linear_operator.cc
+    linear_solver.cc
+    local_parameterization.cc
+    loss_function.cc
+    low_rank_inverse_hessian.cc
+    minimizer.cc
+    normal_prior.cc
+    parallel_utils.cc
+    parameter_block_ordering.cc
+    partitioned_matrix_view.cc
+    polynomial.cc
+    preconditioner.cc
+    preprocessor.cc
+    problem.cc
+    problem_impl.cc
+    program.cc
+    reorder_program.cc
+    residual_block.cc
+    residual_block_utils.cc
+    schur_complement_solver.cc
+    schur_eliminator.cc
+    schur_jacobi_preconditioner.cc
+    schur_templates.cc
+    scratch_evaluate_preparer.cc
+    single_linkage_clustering.cc
+    solver.cc
+    solver_utils.cc
+    sparse_matrix.cc
+    sparse_cholesky.cc
+    sparse_normal_cholesky_solver.cc
+    subset_preconditioner.cc
+    split.cc
+    stringprintf.cc
+    suitesparse.cc
+    thread_token_provider.cc
+    triplet_sparse_matrix.cc
+    trust_region_preprocessor.cc
+    trust_region_minimizer.cc
+    trust_region_step_evaluator.cc
+    trust_region_strategy.cc
+    types.cc
+    visibility.cc
+    visibility_based_preconditioner.cc
+    wall_time.cc
+)
+
+# Also depend on the header files so that they appear in IDEs.
+file(GLOB CERES_INTERNAL_HDRS *.h)
+if (MINIGLOG)
+  file(GLOB MINIGLOG_HDRS miniglog/glog/*.h)
+  list(APPEND CERES_INTERNAL_HDRS ${MINIGLOG_HDRS})
+  if (ANDROID)
+    list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES log)
+  endif()
+endif()
+
+# Depend also on public headers so they appear in IDEs.
+file(GLOB CERES_PUBLIC_HDRS ${Ceres_SOURCE_DIR}/include/ceres/*.h)
+file(GLOB CERES_PUBLIC_INTERNAL_HDRS ${Ceres_SOURCE_DIR}/include/ceres/internal/*.h)
+
+# Include the specialized schur solvers.
+if (SCHUR_SPECIALIZATIONS)
+  file(GLOB CERES_INTERNAL_SCHUR_FILES generated/*.cc)
+else (SCHUR_SPECIALIZATIONS)
+  # Only the fully dynamic solver. The build is much faster this way.
+  file(GLOB CERES_INTERNAL_SCHUR_FILES generated/*_d_d_d.cc)
+endif (SCHUR_SPECIALIZATIONS)
+
+# Build the list of dependencies for Ceres based on the current configuration.
+find_package(Threads QUIET)
+list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES Threads::Threads)
+
+if (NOT MINIGLOG AND GLOG_FOUND)
+  list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES ${GLOG_LIBRARIES})
+  if (GFLAGS_FOUND)
+    # If glog & gflags are both found, we assume that glog was built with
+    # gflags, as it is awkward to perform a try_compile() to verify this
+    # when gflags is an imported target (as it is in newer versions).
+    # As glog #includes gflags/gflags.h in glog/logging.h if compiled with
+    # gflags, it is thus a public dependency for Ceres in this case.
+    list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES ${GFLAGS_LIBRARIES})
+  endif()
+endif (NOT MINIGLOG AND GLOG_FOUND)
+
+if (SUITESPARSE AND SUITESPARSE_FOUND)
+  # Define version information for use in Solver::FullReport.
+  add_definitions(-DCERES_SUITESPARSE_VERSION="${SUITESPARSE_VERSION}")
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${SUITESPARSE_LIBRARIES})
+endif (SUITESPARSE AND SUITESPARSE_FOUND)
+
+if (CXSPARSE AND CXSPARSE_FOUND)
+  # Define version information for use in Solver::FullReport.
+  add_definitions(-DCERES_CXSPARSE_VERSION="${CXSPARSE_VERSION}")
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CXSPARSE_LIBRARIES})
+endif (CXSPARSE AND CXSPARSE_FOUND)
+
+if (ACCELERATESPARSE AND AccelerateSparse_FOUND)
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${AccelerateSparse_LIBRARIES})
+endif()
+
+if (LAPACK_FOUND)
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${LAPACK_LIBRARIES})
+endif ()
+
+set(CERES_LIBRARY_SOURCE
+    ${CERES_INTERNAL_SRC}
+    ${CERES_INTERNAL_HDRS}
+    ${CERES_PUBLIC_HDRS}
+    ${CERES_PUBLIC_INTERNAL_HDRS}
+    ${CERES_INTERNAL_SCHUR_FILES})
+
+# Primarily for Android, but optionally for others, compile the minimal
+# glog implementation into Ceres.
+if (MINIGLOG)
+  list(APPEND CERES_LIBRARY_SOURCE miniglog/glog/logging.cc)
+endif (MINIGLOG)
+
+add_library(ceres ${CERES_LIBRARY_SOURCE})
+set_target_properties(ceres PROPERTIES
+  VERSION ${CERES_VERSION}
+  SOVERSION ${CERES_VERSION_MAJOR})
+
+# The ability to specify a minimum language version via cxx_std_[11,14,17]
+# requires CMake >= 3.8.  Prior to that we have to specify the compiler features
+# we require.
+if (CMAKE_VERSION VERSION_LESS 3.8)
+  set(REQUIRED_PUBLIC_CXX_FEATURES cxx_alignas cxx_alignof cxx_constexpr)
+else()
+  # Forward whatever C++ version Ceres was compiled with as our requirement
+  # for downstream clients.
+  set(REQUIRED_PUBLIC_CXX_FEATURES cxx_std_${CMAKE_CXX_STANDARD})
+endif()
+target_compile_features(ceres PUBLIC ${REQUIRED_PUBLIC_CXX_FEATURES})
+
+include(AppendTargetProperty)
+# Always build position-independent code (PIC), even when building Ceres as a
+# static library so that shared libraries can link against it, not just
+# executables (PIC does not apply on Windows).
+if (NOT WIN32 AND NOT BUILD_SHARED_LIBS)
+  # Use set_target_properties() not append_target_property() here as
+  # POSITION_INDEPENDENT_CODE is a binary ON/OFF switch.
+  set_target_properties(ceres PROPERTIES POSITION_INDEPENDENT_CODE ON)
+endif()
+
+if (BUILD_SHARED_LIBS)
+  # When building a shared library, mark all external libraries as
+  # PRIVATE so they don't show up as a dependency.
+  target_link_libraries(ceres
+        LINK_PUBLIC ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+        LINK_PRIVATE ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+else (BUILD_SHARED_LIBS)
+  # When building a static library, all external libraries are
+  # PUBLIC(default) since the user needs to link to them.
+  # They will be listed in CeresTargets.cmake.
+  set(CERES_LIBRARY_DEPENDENCIES
+        ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+        ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+  target_link_libraries(ceres ${CERES_LIBRARY_DEPENDENCIES})
+endif (BUILD_SHARED_LIBS)
+
+# Add the Ceres headers to its target.
+#
+# Force the location containing the configured config.h to the front of the
+# include_directories list (by default it is appended to the back) to ensure
+# that if the user has an installed version of Ceres in the same location as one
+# of the dependencies (e.g. /usr/local) that we find the config.h we just
+# configured, not the (older) installed config.h.
+target_include_directories(ceres BEFORE PUBLIC
+  $<BUILD_INTERFACE:${Ceres_BINARY_DIR}/config>)
+target_include_directories(ceres PRIVATE ${Ceres_SOURCE_DIR}/internal)
+target_include_directories(ceres PUBLIC
+  $<BUILD_INTERFACE:${Ceres_SOURCE_DIR}/include>
+  $<INSTALL_INTERFACE:include>)
+
+# Eigen SparseQR generates various compiler warnings related to unused and
+# uninitialised local variables.  To avoid having to individually suppress these
+# warnings around the #include statments for Eigen headers across all GCC/Clang
+# versions, we tell CMake to treat Eigen headers as system headers.  This
+# results in all compiler warnings from them being suppressed.
+target_include_directories(ceres SYSTEM PUBLIC ${EIGEN_INCLUDE_DIRS})
+
+# Gather the list of public & private include locations for all enabled optional
+# dependencies to be added to the Ceres target.
+set(CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS "")
+set(CERES_LIBRARY_PUBLIC_DEPENDENCIES_INCLUDE_DIRS "")
+if (MINIGLOG)
+  # Force the miniglog headers to the front of the public include directories
+  # to protect against the case when the user has glog installed in a standard
+  # location (specifically the same as the Ceres install location) but compiled
+  # Ceres with MINIGLOG anyway.  Otherwise: "glog/logging.h" in the public Ceres
+  # headers used in client code would match the installed version of glog, not
+  # the miniglog headers, and the client application would fail to link.
+  #
+  # Note that this is an imperfect fix, as we cannot control the include
+  # directories in client projects, and they could easily invert this ordering
+  # themselves (intentionally or otherwise) and so break their build.
+  target_include_directories(ceres BEFORE PUBLIC
+    $<BUILD_INTERFACE:${Ceres_SOURCE_DIR}/internal/ceres/miniglog>
+    $<INSTALL_INTERFACE:include/ceres/internal/miniglog>)
+elseif (NOT FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION)
+  # Only append glog include directories if the glog found was not a CMake
+  # exported target that already includes them.
+  list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES_INCLUDE_DIRS
+    ${GLOG_INCLUDE_DIRS})
+endif()
+if (SUITESPARSE)
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS
+    ${SUITESPARSE_INCLUDE_DIRS})
+endif()
+if (CXSPARSE)
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS
+    ${CXSPARSE_INCLUDE_DIRS})
+endif()
+if (ACCELERATESPARSE)
+  list(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS
+    ${AccelerateSparse_INCLUDE_DIRS})
+endif()
+if (GFLAGS AND NOT FOUND_INSTALLED_GFLAGS_CMAKE_CONFIGURATION)
+  # Only append gflags include directories if the gflags found was not a CMake
+  # exported target that already includes them.
+  list(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES_INCLUDE_DIRS
+    ${GFLAGS_INCLUDE_DIRS})
+endif()
+# Add include locations for optional dependencies to the Ceres target without
+# duplication.
+list(REMOVE_DUPLICATES CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS)
+foreach(INC_DIR ${CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS})
+  target_include_directories(ceres PRIVATE ${INC_DIR})
+endforeach()
+list(REMOVE_DUPLICATES CERES_LIBRARY_PUBLIC_DEPENDENCIES_INCLUDE_DIRS)
+foreach(INC_DIR ${CERES_LIBRARY_PUBLIC_DEPENDENCIES_INCLUDE_DIRS})
+  target_include_directories(ceres PUBLIC ${INC_DIR})
+endforeach()
+
+install(TARGETS ceres
+        EXPORT  CeresExport
+        RUNTIME DESTINATION bin
+        LIBRARY DESTINATION lib${LIB_SUFFIX}
+        ARCHIVE DESTINATION lib${LIB_SUFFIX})
+
+if (BUILD_TESTING AND GFLAGS)
+  add_library(gtest gmock_gtest_all.cc gmock_main.cc)
+  target_include_directories(gtest PUBLIC ${Ceres_SOURCE_DIR}/internal/ceres)
+  if (BUILD_SHARED_LIBS)
+    # Define gtest-specific shared library flags for compilation.
+    append_target_property(gtest COMPILE_DEFINITIONS
+      GTEST_CREATE_SHARED_LIBRARY)
+  endif()
+
+  add_library(test_util
+              evaluator_test_utils.cc
+              numeric_diff_test_utils.cc
+              test_util.cc)
+  target_include_directories(test_util PUBLIC ${Ceres_SOURCE_DIR}/internal)
+
+  if (MINIGLOG)
+    # When using miniglog, it is compiled into Ceres, thus Ceres becomes
+    # the library against which other libraries should link for logging.
+    target_link_libraries(gtest ${GFLAGS_LIBRARIES} ceres)
+    target_link_libraries(test_util ceres gtest)
+  else (MINIGLOG)
+    target_link_libraries(gtest ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES})
+    target_link_libraries(test_util ceres gtest ${GLOG_LIBRARIES})
+  endif (MINIGLOG)
+
+  macro (CERES_TEST NAME)
+    add_executable(${NAME}_test ${NAME}_test.cc)
+    # Pull in local headers from the generated test directories when ceres_test()
+    # is invoked there, as well as the private headers in this directory which
+    # may be referenced without the 'ceres' path prefix and all private
+    # dependencies that may be directly referenced.
+    target_include_directories(${NAME}_test
+      PUBLIC ${CMAKE_CURRENT_LIST_DIR}
+             ${Ceres_SOURCE_DIR}/internal/ceres
+             ${CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS})
+
+    target_link_libraries(${NAME}_test test_util ceres gtest)
+    if (BUILD_SHARED_LIBS)
+      # Define gtest-specific shared library flags for linking.
+      append_target_property(${NAME}_test COMPILE_DEFINITIONS
+        GTEST_LINKED_AS_SHARED_LIBRARY)
+    endif()
+    add_test(NAME ${NAME}_test
+             COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${NAME}_test
+             --test_srcdir
+             ${Ceres_SOURCE_DIR}/data)
+  endmacro (CERES_TEST)
+
+  ceres_test(array_utils)
+  ceres_test(autodiff)
+  ceres_test(autodiff_cost_function)
+  ceres_test(autodiff_local_parameterization)
+  ceres_test(block_jacobi_preconditioner)
+  ceres_test(block_random_access_dense_matrix)
+  ceres_test(block_random_access_diagonal_matrix)
+  ceres_test(block_random_access_sparse_matrix)
+  ceres_test(block_sparse_matrix)
+  ceres_test(c_api)
+  ceres_test(canonical_views_clustering)
+  ceres_test(compressed_col_sparse_matrix_utils)
+  ceres_test(compressed_row_sparse_matrix)
+  ceres_test(concurrent_queue)
+  ceres_test(conditioned_cost_function)
+  ceres_test(conjugate_gradients_solver)
+  ceres_test(corrector)
+  ceres_test(cost_function_to_functor)
+  ceres_test(covariance)
+  ceres_test(cubic_interpolation)
+  ceres_test(dense_linear_solver)
+  ceres_test(dense_sparse_matrix)
+  ceres_test(detect_structure)
+  ceres_test(dogleg_strategy)
+  ceres_test(dynamic_autodiff_cost_function)
+  ceres_test(dynamic_compressed_row_sparse_matrix)
+  ceres_test(dynamic_numeric_diff_cost_function)
+  ceres_test(dynamic_sparse_normal_cholesky_solver)
+  ceres_test(dynamic_sparsity)
+  ceres_test(evaluation_callback)
+  ceres_test(evaluator)
+  ceres_test(gradient_checker)
+  ceres_test(gradient_checking_cost_function)
+  ceres_test(gradient_problem)
+  ceres_test(gradient_problem_solver)
+  ceres_test(graph)
+  ceres_test(graph_algorithms)
+  ceres_test(householder_vector)
+  ceres_test(implicit_schur_complement)
+  ceres_test(inner_product_computer)
+  ceres_test(invert_psd_matrix)
+  ceres_test(integer_sequence)
+  ceres_test(integer_sequence_algorithm)
+  ceres_test(is_close)
+  ceres_test(iterative_refiner)
+  ceres_test(iterative_schur_complement_solver)
+  ceres_test(jet)
+  ceres_test(levenberg_marquardt_strategy)
+  ceres_test(line_search_minimizer)
+  ceres_test(line_search_preprocessor)
+  ceres_test(local_parameterization)
+  ceres_test(loss_function)
+  ceres_test(minimizer)
+  ceres_test(normal_prior)
+  ceres_test(numeric_diff_cost_function)
+  ceres_test(ordered_groups)
+  ceres_test(parallel_for)
+  ceres_test(parallel_utils)
+  ceres_test(parameter_block)
+  ceres_test(parameter_block_ordering)
+  ceres_test(parameter_dims)
+  ceres_test(partitioned_matrix_view)
+  ceres_test(polynomial)
+  ceres_test(problem)
+  ceres_test(program)
+  ceres_test(reorder_program)
+  ceres_test(residual_block)
+  ceres_test(residual_block_utils)
+  ceres_test(rotation)
+  ceres_test(schur_complement_solver)
+  ceres_test(schur_eliminator)
+  ceres_test(single_linkage_clustering)
+  ceres_test(small_blas)
+  ceres_test(solver)
+  ceres_test(sparse_cholesky)
+  ceres_test(sparse_normal_cholesky_solver)
+  ceres_test(subset_preconditioner)
+  ceres_test(system)
+  ceres_test(tiny_solver)
+  ceres_test(tiny_solver_autodiff_function)
+  ceres_test(tiny_solver_cost_function_adapter)
+  ceres_test(thread_pool)
+  ceres_test(triplet_sparse_matrix)
+  ceres_test(trust_region_minimizer)
+  ceres_test(trust_region_preprocessor)
+  ceres_test(visibility)
+  ceres_test(visibility_based_preconditioner)
+
+  add_subdirectory(generated_bundle_adjustment_tests)
+
+endif (BUILD_TESTING AND GFLAGS)
+
+macro(add_dependencies_to_benchmark BENCHMARK_TARGET)
+  target_link_libraries(${BENCHMARK_TARGET} ceres benchmark::benchmark)
+  target_include_directories(${BENCHMARK_TARGET} PUBLIC
+                             ${Ceres_SOURCE_DIR}/internal
+                             ${CERES_LIBRARY_PRIVATE_DEPENDENCIES_INCLUDE_DIRS})
+endmacro()
+
+if (BUILD_BENCHMARKS)
+  add_executable(autodiff_cost_function_benchmark autodiff_cost_function_benchmark.cc)
+  add_dependencies_to_benchmark(autodiff_cost_function_benchmark)
+
+  add_executable(small_blas_gemv_benchmark small_blas_gemv_benchmark.cc)
+  add_dependencies_to_benchmark(small_blas_gemv_benchmark)
+
+  add_executable(small_blas_gemm_benchmark small_blas_gemm_benchmark.cc)
+  add_dependencies_to_benchmark(small_blas_gemm_benchmark)
+endif (BUILD_BENCHMARKS)
diff --git a/internal/ceres/accelerate_sparse.cc b/internal/ceres/accelerate_sparse.cc
new file mode 100644
index 0000000..dc02986
--- /dev/null
+++ b/internal/ceres/accelerate_sparse.cc
@@ -0,0 +1,249 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+#include "ceres/accelerate_sparse.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+#define CASESTR(x) case x: return #x
+
+namespace ceres {
+namespace internal {
+
+const char* SparseStatusToString(SparseStatus_t status) {
+  switch (status) {
+    CASESTR(SparseStatusOK);
+    CASESTR(SparseFactorizationFailed);
+    CASESTR(SparseMatrixIsSingular);
+    CASESTR(SparseInternalError);
+    CASESTR(SparseParameterError);
+    CASESTR(SparseStatusReleased);
+    default:
+      return "UKNOWN";
+  }
+}
+
+template<typename Scalar>
+void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
+                                     DenseVector* rhs_and_solution) {
+  SparseSolve(*numeric_factor, *rhs_and_solution);
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::ASSparseMatrix
+AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
+    CompressedRowSparseMatrix* A) {
+  // Accelerate uses CSC as its sparse storage format whereas Ceres uses CSR.
+  // As this method returns the transpose view we can flip rows/cols to map
+  // from CSR to CSC^T.
+  //
+  // Accelerate's columnStarts is a long*, not an int*.  These types might be
+  // different (e.g. ARM on iOS) so always make a copy.
+  column_starts_.resize(A->num_rows() +1); // +1 for final column length.
+  std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
+
+  ASSparseMatrix At;
+  At.structure.rowCount = A->num_cols();
+  At.structure.columnCount = A->num_rows();
+  At.structure.columnStarts = &column_starts_[0];
+  At.structure.rowIndices = A->mutable_cols();
+  At.structure.attributes.transpose = false;
+  At.structure.attributes.triangle = SparseUpperTriangle;
+  At.structure.attributes.kind = SparseSymmetric;
+  At.structure.attributes._reserved = 0;
+  At.structure.attributes._allocatedBySparse = 0;
+  At.structure.blockSize = 1;
+  if (std::is_same<Scalar, double>::value) {
+    At.data = reinterpret_cast<Scalar*>(A->mutable_values());
+  } else {
+    values_ =
+        ConstVectorRef(A->values(), A->num_nonzeros()).template cast<Scalar>();
+    At.data = values_.data();
+  }
+  return At;
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::SymbolicFactorization
+AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
+  return SparseFactor(SparseFactorizationCholesky, A->structure);
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::NumericFactorization
+AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
+                                   SymbolicFactorization* symbolic_factor) {
+  return SparseFactor(*symbolic_factor, *A);
+}
+
+template<typename Scalar>
+void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
+                                        NumericFactorization* numeric_factor) {
+  return SparseRefactor(*A, numeric_factor);
+}
+
+// Instantiate only for the specific template types required/supported s/t the
+// definition can be in the .cc file.
+template class AccelerateSparse<double>;
+template class AccelerateSparse<float>;
+
+template<typename Scalar>
+std::unique_ptr<SparseCholesky>
+AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
+  return std::unique_ptr<SparseCholesky>(
+      new AppleAccelerateCholesky<Scalar>(ordering_type));
+}
+
+template<typename Scalar>
+AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
+    const OrderingType ordering_type)
+    : ordering_type_(ordering_type) {}
+
+template<typename Scalar>
+AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
+  FreeSymbolicFactorization();
+  FreeNumericFactorization();
+}
+
+template<typename Scalar>
+CompressedRowSparseMatrix::StorageType
+AppleAccelerateCholesky<Scalar>::StorageType() const {
+  return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+}
+
+template<typename Scalar>
+LinearSolverTerminationType
+AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
+                                           std::string* message) {
+  CHECK_EQ(lhs->storage_type(), StorageType());
+  if (lhs == NULL) {
+    *message = "Failure: Input lhs is NULL.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+  typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
+      as_.CreateSparseMatrixTransposeView(lhs);
+
+  if (!symbolic_factor_) {
+    symbolic_factor_.reset(
+        new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
+            as_.AnalyzeCholesky(&as_lhs)));
+    if (symbolic_factor_->status != SparseStatusOK) {
+      *message = StringPrintf(
+          "Apple Accelerate Failure : Symbolic factorisation failed: %s",
+          SparseStatusToString(symbolic_factor_->status));
+      FreeSymbolicFactorization();
+      return LINEAR_SOLVER_FATAL_ERROR;
+    }
+  }
+
+  if (!numeric_factor_) {
+    numeric_factor_.reset(
+        new typename SparseTypesTrait<Scalar>::NumericFactorization(
+            as_.Cholesky(&as_lhs, symbolic_factor_.get())));
+  } else {
+    // Recycle memory from previous numeric factorization.
+    as_.Cholesky(&as_lhs, numeric_factor_.get());
+  }
+  if (numeric_factor_->status != SparseStatusOK) {
+    *message = StringPrintf(
+        "Apple Accelerate Failure : Numeric factorisation failed: %s",
+        SparseStatusToString(numeric_factor_->status));
+    FreeNumericFactorization();
+    return LINEAR_SOLVER_FAILURE;
+  }
+
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+template<typename Scalar>
+LinearSolverTerminationType
+AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
+                                       double* solution,
+                                       std::string* message) {
+  CHECK_EQ(numeric_factor_->status, SparseStatusOK)
+      << "Solve called without a call to Factorize first ("
+      << SparseStatusToString(numeric_factor_->status) << ").";
+  const int num_cols = numeric_factor_->symbolicFactorization.columnCount;
+
+  typename SparseTypesTrait<Scalar>::DenseVector as_rhs_and_solution;
+  as_rhs_and_solution.count = num_cols;
+  if (std::is_same<Scalar, double>::value) {
+    as_rhs_and_solution.data = reinterpret_cast<Scalar*>(solution);
+    std::copy_n(rhs, num_cols, solution);
+  } else {
+    scalar_rhs_and_solution_ =
+        ConstVectorRef(rhs, num_cols).template cast<Scalar>();
+    as_rhs_and_solution.data = scalar_rhs_and_solution_.data();
+  }
+  as_.Solve(numeric_factor_.get(), &as_rhs_and_solution);
+  if (!std::is_same<Scalar, double>::value) {
+    VectorRef(solution, num_cols) =
+        scalar_rhs_and_solution_.template cast<double>();
+  }
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+template<typename Scalar>
+void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
+  if (symbolic_factor_) {
+    SparseCleanup(*symbolic_factor_);
+    symbolic_factor_.reset();
+  }
+}
+
+template<typename Scalar>
+void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
+  if (numeric_factor_) {
+    SparseCleanup(*numeric_factor_);
+    numeric_factor_.reset();
+  }
+}
+
+// Instantiate only for the specific template types required/supported s/t the
+// definition can be in the .cc file.
+template class AppleAccelerateCholesky<double>;
+template class AppleAccelerateCholesky<float>;
+
+}
+}
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
diff --git a/internal/ceres/accelerate_sparse.h b/internal/ceres/accelerate_sparse.h
new file mode 100644
index 0000000..b849a80
--- /dev/null
+++ b/internal/ceres/accelerate_sparse.h
@@ -0,0 +1,145 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+#ifndef CERES_INTERNAL_ACCELERATE_SPARSE_H_
+#define CERES_INTERNAL_ACCELERATE_SPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "Accelerate.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+template<typename Scalar>
+struct SparseTypesTrait {
+};
+
+template<>
+struct SparseTypesTrait<double> {
+  typedef DenseVector_Double DenseVector;
+  typedef SparseMatrix_Double SparseMatrix;
+  typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
+  typedef SparseOpaqueFactorization_Double NumericFactorization;
+};
+
+template<>
+struct SparseTypesTrait<float> {
+  typedef DenseVector_Float DenseVector;
+  typedef SparseMatrix_Float SparseMatrix;
+  typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
+  typedef SparseOpaqueFactorization_Float NumericFactorization;
+};
+
+template<typename Scalar>
+class AccelerateSparse {
+ public:
+  using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
+  // Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
+  using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
+  using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
+  using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
+
+  // Solves a linear system given its symbolic (reference counted within
+  // NumericFactorization) and numeric factorization.
+  void Solve(NumericFactorization* numeric_factor,
+             DenseVector* rhs_and_solution);
+
+  // Note: Accelerate's API passes/returns its objects by value, but as the
+  //       objects contain pointers to the underlying data these copies are
+  //       all shallow (in some cases Accelerate also reference counts the
+  //       objects internally).
+  ASSparseMatrix CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+  // Computes a symbolic factorisation of A that can be used in Solve().
+  SymbolicFactorization AnalyzeCholesky(ASSparseMatrix* A);
+  // Compute the numeric Cholesky factorization of A, given its
+  // symbolic factorization.
+  NumericFactorization Cholesky(ASSparseMatrix* A,
+                                SymbolicFactorization* symbolic_factor);
+  // Reuse the NumericFactorization from a previous matrix with the same
+  // symbolic factorization to represent a new numeric factorization.
+  void Cholesky(ASSparseMatrix* A, NumericFactorization* numeric_factor);
+
+ private:
+  std::vector<long> column_starts_;
+  // Storage for the values of A if Scalar != double (necessitating a copy).
+  Eigen::Matrix<Scalar, Eigen::Dynamic, 1> values_;
+};
+
+// An implementation of SparseCholesky interface using Apple's Accelerate
+// framework.
+template<typename Scalar>
+class AppleAccelerateCholesky : public SparseCholesky {
+ public:
+  // Factory
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
+
+  // SparseCholesky interface.
+  virtual ~AppleAccelerateCholesky();
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const;
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message);
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message);
+
+ private:
+  AppleAccelerateCholesky(const OrderingType ordering_type);
+  void FreeSymbolicFactorization();
+  void FreeNumericFactorization();
+
+  const OrderingType ordering_type_;
+  AccelerateSparse<Scalar> as_;
+  std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
+  symbolic_factor_;
+  std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
+  numeric_factor_;
+  // Copy of rhs/solution if Scalar != double (necessitating a copy).
+  Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
+};
+
+}
+}
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
+#endif  // CERES_INTERNAL_ACCELERATE_SPARSE_H_
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
new file mode 100644
index 0000000..32459e6
--- /dev/null
+++ b/internal/ceres/array_utils.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/array_utils.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <string>
+#include <vector>
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+bool IsArrayValid(const int size, const double* x) {
+  if (x != NULL) {
+    for (int i = 0; i < size; ++i) {
+      if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue))  {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+int FindInvalidValue(const int size, const double* x) {
+  if (x == NULL) {
+    return size;
+  }
+
+  for (int i = 0; i < size; ++i) {
+    if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue))  {
+      return i;
+    }
+  }
+
+  return size;
+}
+
+void InvalidateArray(const int size, double* x) {
+  if (x != NULL) {
+    for (int i = 0; i < size; ++i) {
+      x[i] = kImpossibleValue;
+    }
+  }
+}
+
+void AppendArrayToString(const int size, const double* x, string* result) {
+  for (int i = 0; i < size; ++i) {
+    if (x == NULL) {
+      StringAppendF(result, "Not Computed  ");
+    } else {
+      if (x[i] == kImpossibleValue) {
+        StringAppendF(result, "Uninitialized ");
+      } else {
+        StringAppendF(result, "%12g ", x[i]);
+      }
+    }
+  }
+}
+
+void MapValuesToContiguousRange(const int size, int* array) {
+  std::vector<int> unique_values(array, array + size);
+  std::sort(unique_values.begin(), unique_values.end());
+  unique_values.erase(std::unique(unique_values.begin(),
+                                  unique_values.end()),
+                      unique_values.end());
+
+  for (int i = 0; i < size; ++i) {
+    array[i] = std::lower_bound(unique_values.begin(),
+                                unique_values.end(),
+                                array[i]) - unique_values.begin();
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
new file mode 100644
index 0000000..1d55733
--- /dev/null
+++ b/internal/ceres/array_utils.h
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Utility routines for validating arrays.
+//
+// These are useful for detecting two common class of errors.
+//
+// 1. Uninitialized memory - where the user for some reason did not
+// compute part of an array, but the code expects it.
+//
+// 2. Numerical failure while computing the cost/residual/jacobian,
+// e.g. NaN, infinities etc. This is particularly useful since the
+// automatic differentiation code does computations that are not
+// evident to the user and can silently generate hard to debug errors.
+
+#ifndef CERES_INTERNAL_ARRAY_UTILS_H_
+#define CERES_INTERNAL_ARRAY_UTILS_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Fill the array x with an impossible value that the user code is
+// never expected to compute.
+void InvalidateArray(int size, double* x);
+
+// Check if all the entries of the array x are valid, i.e. all the
+// values in the array should be finite and none of them should be
+// equal to the "impossible" value used by InvalidateArray.
+bool IsArrayValid(int size, const double* x);
+
+// If the array contains an invalid value, return the index for it,
+// otherwise return size.
+int FindInvalidValue(const int size, const double* x);
+
+// Utility routine to print an array of doubles to a string. If the
+// array pointer is NULL, it is treated as an array of zeros.
+void AppendArrayToString(const int size, const double* x, std::string* result);
+
+// This routine takes an array of integer values, sorts and uniques
+// them and then maps each value in the array to its position in the
+// sorted+uniqued array. By doing this, if there are k unique
+// values in the array, each value is replaced by an integer in the
+// range [0, k-1], while preserving their relative order.
+//
+// For example
+//
+// [1 0 3 5 0 1 5]
+//
+// gets mapped to
+//
+// [1 0 2 3 0 1 3]
+void MapValuesToContiguousRange(int size, int* array);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_ARRAY_UTILS_H_
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
new file mode 100644
index 0000000..77379d9
--- /dev/null
+++ b/internal/ceres/array_utils_test.cc
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/array_utils.h"
+
+#include <limits>
+#include <cmath>
+#include <vector>
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+TEST(ArrayUtils, IsArrayValid) {
+  double x[3];
+  x[0] = 0.0;
+  x[1] = 1.0;
+  x[2] = 2.0;
+  EXPECT_TRUE(IsArrayValid(3, x));
+  x[1] = std::numeric_limits<double>::infinity();
+  EXPECT_FALSE(IsArrayValid(3, x));
+  x[1] = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_FALSE(IsArrayValid(3, x));
+  x[1] = std::numeric_limits<double>::signaling_NaN();
+  EXPECT_FALSE(IsArrayValid(3, x));
+  EXPECT_TRUE(IsArrayValid(1, NULL));
+  InvalidateArray(3, x);
+  EXPECT_FALSE(IsArrayValid(3, x));
+}
+
+TEST(ArrayUtils, FindInvalidIndex) {
+  double x[3];
+  x[0] = 0.0;
+  x[1] = 1.0;
+  x[2] = 2.0;
+  EXPECT_EQ(FindInvalidValue(3, x), 3);
+  x[1] = std::numeric_limits<double>::infinity();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  x[1] = std::numeric_limits<double>::quiet_NaN();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  x[1] = std::numeric_limits<double>::signaling_NaN();
+  EXPECT_EQ(FindInvalidValue(3, x), 1);
+  EXPECT_EQ(FindInvalidValue(1, NULL), 1);
+  InvalidateArray(3, x);
+  EXPECT_EQ(FindInvalidValue(3, x), 0);
+}
+
+TEST(MapValuesToContiguousRange, ContiguousEntries) {
+  vector<int> array;
+  array.push_back(0);
+  array.push_back(1);
+  vector<int> expected = array;
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+  array.clear();
+
+  array.push_back(1);
+  array.push_back(0);
+  expected = array;
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousEntries) {
+  vector<int> array;
+  array.push_back(0);
+  array.push_back(2);
+  vector<int> expected;
+  expected.push_back(0);
+  expected.push_back(1);
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousRepeatingEntries) {
+  vector<int> array;
+  array.push_back(3);
+  array.push_back(1);
+  array.push_back(0);
+  array.push_back(0);
+  array.push_back(0);
+  array.push_back(5);
+  vector<int> expected;
+  expected.push_back(2);
+  expected.push_back(1);
+  expected.push_back(0);
+  expected.push_back(0);
+  expected.push_back(0);
+  expected.push_back(3);
+  MapValuesToContiguousRange(array.size(), &array[0]);
+  EXPECT_EQ(array, expected);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/autodiff_cost_function_benchmark.cc b/internal/ceres/autodiff_cost_function_benchmark.cc
new file mode 100644
index 0000000..b9c106e
--- /dev/null
+++ b/internal/ceres/autodiff_cost_function_benchmark.cc
@@ -0,0 +1,116 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+
+#include "benchmark/benchmark.h"
+#include "ceres/ceres.h"
+#include "ceres/jet.h"
+
+namespace ceres {
+
+// From the NIST problem collection.
+struct Rat43CostFunctor {
+  Rat43CostFunctor(const double x, const double y) : x_(x), y_(y) {}
+
+  template <typename T>
+  bool operator()(const T* parameters, T* residuals) const {
+    const T& b1 = parameters[0];
+    const T& b2 = parameters[1];
+    const T& b3 = parameters[2];
+    const T& b4 = parameters[3];
+    residuals[0] = b1 * pow(1.0 + exp(b2 - b3 * x_), -1.0 / b4) - y_;
+    return true;
+  }
+
+ private:
+  const double x_;
+  const double y_;
+};
+
+// Simple implementation of autodiff using Jets directly instead of
+// going through the machinery of AutoDiffCostFunction, which does
+// the same thing, but much more generically.
+class Rat43Automatic : public ceres::SizedCostFunction<1, 4> {
+ public:
+  Rat43Automatic(const Rat43CostFunctor* functor) : functor_(functor) {}
+  virtual ~Rat43Automatic() {}
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    if (!jacobians) {
+      return (*functor_)(parameters[0], residuals);
+    }
+
+    typedef ceres::Jet<double, 4> JetT;
+    JetT jets[4];
+    for (int i = 0; i < 4; ++i) {
+      jets[i].a = parameters[0][i];
+      jets[i].v.setZero();
+      jets[i].v[i] = 1.0;
+    }
+
+    JetT result;
+    (*functor_)(jets, &result);
+
+    residuals[0] = result.a;
+    for (int i = 0; i < 4; ++i) {
+      jacobians[0][i] = result.v[i];
+    }
+    return true;
+  }
+
+ private:
+  std::unique_ptr<const Rat43CostFunctor> functor_;
+};
+
+static void BM_Rat43AutoDiff(benchmark::State& state) {
+  double parameter_block1[] = {1., 2., 3., 4.};
+  double* parameters[] = {parameter_block1};
+
+  double jacobian1[] = {0.0, 0.0, 0.0, 0.0};
+  double residuals;
+  double* jacobians[] = {jacobian1};
+  const double x = 0.2;
+  const double y = 0.3;
+  std::unique_ptr<ceres::CostFunction> cost_function(
+      new ceres::AutoDiffCostFunction<Rat43CostFunctor, 1, 4>(
+          new Rat43CostFunctor(x, y)));
+
+  while (state.KeepRunning()) {
+    cost_function->Evaluate(
+        parameters, &residuals, state.range(0) ? jacobians : nullptr);
+  }
+}
+BENCHMARK(BM_Rat43AutoDiff)->Arg(0)->Arg(1);
+
+}  // namespace ceres
+
+BENCHMARK_MAIN();
diff --git a/internal/ceres/autodiff_cost_function_test.cc b/internal/ceres/autodiff_cost_function_test.cc
new file mode 100644
index 0000000..4795579
--- /dev/null
+++ b/internal/ceres/autodiff_cost_function_test.cc
@@ -0,0 +1,175 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/autodiff_cost_function.h"
+
+#include <cstddef>
+#include <memory>
+
+#include "gtest/gtest.h"
+#include "ceres/cost_function.h"
+#include "ceres/array_utils.h"
+
+namespace ceres {
+namespace internal {
+
+class BinaryScalarCost {
+ public:
+  explicit BinaryScalarCost(double a): a_(a) {}
+  template <typename T>
+  bool operator()(const T* const x, const T* const y,
+                  T* cost) const {
+    cost[0] = x[0] * y[0] + x[1] * y[1]  - T(a_);
+    return true;
+  }
+ private:
+  double a_;
+};
+
+TEST(AutodiffCostFunction, BilinearDifferentiationTest) {
+  CostFunction* cost_function  =
+    new AutoDiffCostFunction<BinaryScalarCost, 1, 2, 2>(
+        new BinaryScalarCost(1.0));
+
+  double** parameters = new double*[2];
+  parameters[0] = new double[2];
+  parameters[1] = new double[2];
+
+  parameters[0][0] = 1;
+  parameters[0][1] = 2;
+
+  parameters[1][0] = 3;
+  parameters[1][1] = 4;
+
+  double** jacobians = new double*[2];
+  jacobians[0] = new double[2];
+  jacobians[1] = new double[2];
+
+  double residuals = 0.0;
+
+  cost_function->Evaluate(parameters, &residuals, NULL);
+  EXPECT_EQ(10.0, residuals);
+
+  cost_function->Evaluate(parameters, &residuals, jacobians);
+  EXPECT_EQ(10.0, residuals);
+
+  EXPECT_EQ(3, jacobians[0][0]);
+  EXPECT_EQ(4, jacobians[0][1]);
+  EXPECT_EQ(1, jacobians[1][0]);
+  EXPECT_EQ(2, jacobians[1][1]);
+
+  delete[] jacobians[0];
+  delete[] jacobians[1];
+  delete[] parameters[0];
+  delete[] parameters[1];
+  delete[] jacobians;
+  delete[] parameters;
+  delete cost_function;
+}
+
+struct TenParameterCost {
+  template <typename T>
+  bool operator()(const T* const x0,
+                  const T* const x1,
+                  const T* const x2,
+                  const T* const x3,
+                  const T* const x4,
+                  const T* const x5,
+                  const T* const x6,
+                  const T* const x7,
+                  const T* const x8,
+                  const T* const x9,
+                  T* cost) const {
+    cost[0] = *x0 + *x1 + *x2 + *x3 + *x4 + *x5 + *x6 + *x7 + *x8 + *x9;
+    return true;
+  }
+};
+
+TEST(AutodiffCostFunction, ManyParameterAutodiffInstantiates) {
+  CostFunction* cost_function  =
+      new AutoDiffCostFunction<
+          TenParameterCost, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>(
+              new TenParameterCost);
+
+  double** parameters = new double*[10];
+  double** jacobians = new double*[10];
+  for (int i = 0; i < 10; ++i) {
+    parameters[i] = new double[1];
+    parameters[i][0] = i;
+    jacobians[i] = new double[1];
+  }
+
+  double residuals = 0.0;
+
+  cost_function->Evaluate(parameters, &residuals, NULL);
+  EXPECT_EQ(45.0, residuals);
+
+  cost_function->Evaluate(parameters, &residuals, jacobians);
+  EXPECT_EQ(residuals, 45.0);
+  for (int i = 0; i < 10; ++i) {
+    EXPECT_EQ(1.0, jacobians[i][0]);
+  }
+
+  for (int i = 0; i < 10; ++i) {
+    delete[] jacobians[i];
+    delete[] parameters[i];
+  }
+  delete[] jacobians;
+  delete[] parameters;
+  delete cost_function;
+}
+
+struct OnlyFillsOneOutputFunctor {
+  template <typename T>
+  bool operator()(const T* x, T* output) const {
+    output[0] = x[0];
+    return true;
+  }
+};
+
+TEST(AutoDiffCostFunction, PartiallyFilledResidualShouldFailEvaluation) {
+  double parameter = 1.0;
+  double jacobian[2];
+  double residuals[2];
+  double* parameters[] = {&parameter};
+  double* jacobians[] = {jacobian};
+
+  std::unique_ptr<CostFunction> cost_function(
+      new AutoDiffCostFunction<OnlyFillsOneOutputFunctor, 2, 1>(
+          new OnlyFillsOneOutputFunctor));
+  InvalidateArray(2, jacobian);
+  InvalidateArray(2, residuals);
+  EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, jacobians));
+  EXPECT_FALSE(IsArrayValid(2, jacobian));
+  EXPECT_FALSE(IsArrayValid(2, residuals));
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/autodiff_local_parameterization_test.cc b/internal/ceres/autodiff_local_parameterization_test.cc
new file mode 100644
index 0000000..f2396dc
--- /dev/null
+++ b/internal/ceres/autodiff_local_parameterization_test.cc
@@ -0,0 +1,223 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include "ceres/autodiff_local_parameterization.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/rotation.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+struct IdentityPlus {
+  template <typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    for (int i = 0; i < 3; ++i) {
+      x_plus_delta[i] = x[i] + delta[i];
+    }
+    return true;
+  }
+};
+
+TEST(AutoDiffLocalParameterizationTest, IdentityParameterization) {
+  AutoDiffLocalParameterization<IdentityPlus, 3, 3>
+      parameterization;
+
+  double x[3] = {1.0, 2.0, 3.0};
+  double delta[3] = {0.0, 1.0, 2.0};
+  double x_plus_delta[3] = {0.0, 0.0, 0.0};
+  parameterization.Plus(x, delta, x_plus_delta);
+
+  EXPECT_EQ(x_plus_delta[0], 1.0);
+  EXPECT_EQ(x_plus_delta[1], 3.0);
+  EXPECT_EQ(x_plus_delta[2], 5.0);
+
+  double jacobian[9];
+  parameterization.ComputeJacobian(x, jacobian);
+  int k = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j, ++k) {
+      EXPECT_EQ(jacobian[k], (i == j) ? 1.0 : 0.0);
+    }
+  }
+}
+
+struct ScaledPlus {
+  explicit ScaledPlus(const double &scale_factor)
+     : scale_factor_(scale_factor)
+  {}
+
+  template <typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    for (int i = 0; i < 3; ++i) {
+      x_plus_delta[i] = x[i] + T(scale_factor_) * delta[i];
+    }
+    return true;
+  }
+
+  const double scale_factor_;
+};
+
+TEST(AutoDiffLocalParameterizationTest, ScaledParameterization) {
+  const double kTolerance = 1e-14;
+
+  AutoDiffLocalParameterization<ScaledPlus, 3, 3>
+      parameterization(new ScaledPlus(1.2345));
+
+  double x[3] = {1.0, 2.0, 3.0};
+  double delta[3] = {0.0, 1.0, 2.0};
+  double x_plus_delta[3] = {0.0, 0.0, 0.0};
+  parameterization.Plus(x, delta, x_plus_delta);
+
+  EXPECT_NEAR(x_plus_delta[0], 1.0, kTolerance);
+  EXPECT_NEAR(x_plus_delta[1], 3.2345, kTolerance);
+  EXPECT_NEAR(x_plus_delta[2], 5.469, kTolerance);
+
+  double jacobian[9];
+  parameterization.ComputeJacobian(x, jacobian);
+  int k = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j, ++k) {
+      EXPECT_NEAR(jacobian[k], (i == j) ? 1.2345 : 0.0, kTolerance);
+    }
+  }
+}
+
+struct QuaternionPlus {
+  template<typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    const T squared_norm_delta =
+        delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
+
+    T q_delta[4];
+    if (squared_norm_delta > T(0.0)) {
+      T norm_delta = sqrt(squared_norm_delta);
+      const T sin_delta_by_delta = sin(norm_delta) / norm_delta;
+      q_delta[0] = cos(norm_delta);
+      q_delta[1] = sin_delta_by_delta * delta[0];
+      q_delta[2] = sin_delta_by_delta * delta[1];
+      q_delta[3] = sin_delta_by_delta * delta[2];
+    } else {
+      // We do not just use q_delta = [1,0,0,0] here because that is a
+      // constant and when used for automatic differentiation will
+      // lead to a zero derivative. Instead we take a first order
+      // approximation and evaluate it at zero.
+      q_delta[0] = T(1.0);
+      q_delta[1] = delta[0];
+      q_delta[2] = delta[1];
+      q_delta[3] = delta[2];
+    }
+
+    QuaternionProduct(q_delta, x, x_plus_delta);
+    return true;
+  }
+};
+
+void QuaternionParameterizationTestHelper(const double* x,
+                                          const double* delta) {
+  const double kTolerance = 1e-14;
+  double x_plus_delta_ref[4] = {0.0, 0.0, 0.0, 0.0};
+  double jacobian_ref[12];
+
+
+  QuaternionParameterization ref_parameterization;
+  ref_parameterization.Plus(x, delta, x_plus_delta_ref);
+  ref_parameterization.ComputeJacobian(x, jacobian_ref);
+
+  double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+  double jacobian[12];
+  AutoDiffLocalParameterization<QuaternionPlus, 4, 3> parameterization;
+  parameterization.Plus(x, delta, x_plus_delta);
+  parameterization.ComputeJacobian(x, jacobian);
+
+  for (int i = 0; i < 4; ++i) {
+    EXPECT_NEAR(x_plus_delta[i], x_plus_delta_ref[i], kTolerance);
+  }
+
+  const double x_plus_delta_norm =
+      sqrt(x_plus_delta[0] * x_plus_delta[0] +
+           x_plus_delta[1] * x_plus_delta[1] +
+           x_plus_delta[2] * x_plus_delta[2] +
+           x_plus_delta[3] * x_plus_delta[3]);
+
+  EXPECT_NEAR(x_plus_delta_norm, 1.0, kTolerance);
+
+  for (int i = 0; i < 12; ++i) {
+    EXPECT_TRUE(std::isfinite(jacobian[i]));
+    EXPECT_NEAR(jacobian[i], jacobian_ref[i], kTolerance)
+        << "Jacobian mismatch: i = " << i
+        << "\n Expected \n" << ConstMatrixRef(jacobian_ref, 4, 3)
+        << "\n Actual \n" << ConstMatrixRef(jacobian, 4, 3);
+  }
+}
+
+TEST(AutoDiffLocalParameterization, QuaternionParameterizationZeroTest) {
+  double x[4] = {0.5, 0.5, 0.5, 0.5};
+  double delta[3] = {0.0, 0.0, 0.0};
+  QuaternionParameterizationTestHelper(x, delta);
+}
+
+
+TEST(AutoDiffLocalParameterization, QuaternionParameterizationNearZeroTest) {
+  double x[4] = {0.52, 0.25, 0.15, 0.45};
+  double norm_x = sqrt(x[0] * x[0] +
+                       x[1] * x[1] +
+                       x[2] * x[2] +
+                       x[3] * x[3]);
+  for (int i = 0; i < 4; ++i) {
+    x[i] = x[i] / norm_x;
+  }
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  for (int i = 0; i < 3; ++i) {
+    delta[i] = delta[i] * 1e-14;
+  }
+
+  QuaternionParameterizationTestHelper(x, delta);
+}
+
+TEST(AutoDiffLocalParameterization, QuaternionParameterizationNonZeroTest) {
+  double x[4] = {0.52, 0.25, 0.15, 0.45};
+  double norm_x = sqrt(x[0] * x[0] +
+                       x[1] * x[1] +
+                       x[2] * x[2] +
+                       x[3] * x[3]);
+
+  for (int i = 0; i < 4; ++i) {
+    x[i] = x[i] / norm_x;
+  }
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  QuaternionParameterizationTestHelper(x, delta);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/autodiff_test.cc b/internal/ceres/autodiff_test.cc
new file mode 100644
index 0000000..04a77ea
--- /dev/null
+++ b/internal/ceres/autodiff_test.cc
@@ -0,0 +1,667 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/internal/autodiff.h"
+
+#include "gtest/gtest.h"
+#include "ceres/random.h"
+
+namespace ceres {
+namespace internal {
+
+template <typename T> inline
+T &RowMajorAccess(T *base, int rows, int cols, int i, int j) {
+  return base[cols * i + j];
+}
+
+// Do (symmetric) finite differencing using the given function object 'b' of
+// type 'B' and scalar type 'T' with step size 'del'.
+//
+// The type B should have a signature
+//
+//   bool operator()(T const *, T *) const;
+//
+// which maps a vector of parameters to a vector of outputs.
+template <typename B, typename T, int M, int N> inline
+bool SymmetricDiff(const B& b,
+                   const T par[N],
+                   T del,           // step size.
+                   T fun[M],
+                   T jac[M * N]) {  // row-major.
+  if (!b(par, fun)) {
+    return false;
+  }
+
+  // Temporary parameter vector.
+  T tmp_par[N];
+  for (int j = 0; j < N; ++j) {
+    tmp_par[j] = par[j];
+  }
+
+  // For each dimension, we do one forward step and one backward step in
+  // parameter space, and store the output vector vectors in these vectors.
+  T fwd_fun[M];
+  T bwd_fun[M];
+
+  for (int j = 0; j < N; ++j) {
+    // Forward step.
+    tmp_par[j] = par[j] + del;
+    if (!b(tmp_par, fwd_fun)) {
+      return false;
+    }
+
+    // Backward step.
+    tmp_par[j] = par[j] - del;
+    if (!b(tmp_par, bwd_fun)) {
+      return false;
+    }
+
+    // Symmetric differencing:
+    //   f'(a) = (f(a + h) - f(a - h)) / (2 h)
+    for (int i = 0; i < M; ++i) {
+      RowMajorAccess(jac, M, N, i, j) =
+          (fwd_fun[i] - bwd_fun[i]) / (T(2) * del);
+    }
+
+    // Restore our temporary vector.
+    tmp_par[j] = par[j];
+  }
+
+  return true;
+}
+
+template <typename A> inline
+void QuaternionToScaledRotation(A const q[4], A R[3 * 3]) {
+  // Make convenient names for elements of q.
+  A a = q[0];
+  A b = q[1];
+  A c = q[2];
+  A d = q[3];
+  // This is not to eliminate common sub-expression, but to
+  // make the lines shorter so that they fit in 80 columns!
+  A aa = a*a;
+  A ab = a*b;
+  A ac = a*c;
+  A ad = a*d;
+  A bb = b*b;
+  A bc = b*c;
+  A bd = b*d;
+  A cc = c*c;
+  A cd = c*d;
+  A dd = d*d;
+#define R(i, j) RowMajorAccess(R, 3, 3, (i), (j))
+  R(0, 0) =  aa+bb-cc-dd; R(0, 1) = A(2)*(bc-ad); R(0, 2) = A(2)*(ac+bd);  // NOLINT
+  R(1, 0) = A(2)*(ad+bc); R(1, 1) =  aa-bb+cc-dd; R(1, 2) = A(2)*(cd-ab);  // NOLINT
+  R(2, 0) = A(2)*(bd-ac); R(2, 1) = A(2)*(ab+cd); R(2, 2) =  aa-bb-cc+dd;  // NOLINT
+#undef R
+}
+
+// A structure for projecting a 3x4 camera matrix and a
+// homogeneous 3D point, to a 2D inhomogeneous point.
+struct Projective {
+  // Function that takes P and X as separate vectors:
+  //   P, X -> x
+  template <typename A>
+  bool operator()(A const P[12], A const X[4], A x[2]) const {
+    A PX[3];
+    for (int i = 0; i < 3; ++i) {
+      PX[i] = RowMajorAccess(P, 3, 4, i, 0) * X[0] +
+              RowMajorAccess(P, 3, 4, i, 1) * X[1] +
+              RowMajorAccess(P, 3, 4, i, 2) * X[2] +
+              RowMajorAccess(P, 3, 4, i, 3) * X[3];
+    }
+    if (PX[2] != 0.0) {
+      x[0] = PX[0] / PX[2];
+      x[1] = PX[1] / PX[2];
+      return true;
+    }
+    return false;
+  }
+
+  // Version that takes P and X packed in one vector:
+  //
+  //   (P, X) -> x
+  //
+  template <typename A>
+  bool operator()(A const P_X[12 + 4], A x[2]) const {
+    return operator()(P_X + 0, P_X + 12, x);
+  }
+};
+
+// Test projective camera model projector.
+TEST(AutoDiff, ProjectiveCameraModel) {
+  srand(5);
+  double const tol = 1e-10;  // floating-point tolerance.
+  double const del = 1e-4;   // finite-difference step.
+  double const err = 1e-6;   // finite-difference tolerance.
+
+  Projective b;
+
+  // Make random P and X, in a single vector.
+  double PX[12 + 4];
+  for (int i = 0; i < 12 + 4; ++i) {
+    PX[i] = RandDouble();
+  }
+
+  // Handy names for the P and X parts.
+  double *P = PX + 0;
+  double *X = PX + 12;
+
+  // Apply the mapping, to get image point b_x.
+  double b_x[2];
+  b(P, X, b_x);
+
+  // Use finite differencing to estimate the Jacobian.
+  double fd_x[2];
+  double fd_J[2 * (12 + 4)];
+  ASSERT_TRUE((SymmetricDiff<Projective, double, 2, 12 + 4>(b, PX, del,
+                                                            fd_x, fd_J)));
+
+  for (int i = 0; i < 2; ++i) {
+    ASSERT_NEAR(fd_x[i], b_x[i], tol);
+  }
+
+  // Use automatic differentiation to compute the Jacobian.
+  double ad_x1[2];
+  double J_PX[2 * (12 + 4)];
+  {
+    double *parameters[] = { PX };
+    double *jacobians[] = { J_PX };
+    ASSERT_TRUE((AutoDifferentiate<StaticParameterDims<12 + 4>>(
+        b, parameters, 2, ad_x1, jacobians)));
+
+    for (int i = 0; i < 2; ++i) {
+      ASSERT_NEAR(ad_x1[i], b_x[i], tol);
+    }
+  }
+
+  // Use automatic differentiation (again), with two arguments.
+  {
+    double ad_x2[2];
+    double J_P[2 * 12];
+    double J_X[2 * 4];
+    double *parameters[] = { P, X };
+    double *jacobians[] = { J_P, J_X };
+    ASSERT_TRUE((AutoDifferentiate<StaticParameterDims<12, 4>>(
+        b, parameters, 2, ad_x2, jacobians)));
+
+    for (int i = 0; i < 2; ++i) {
+      ASSERT_NEAR(ad_x2[i], b_x[i], tol);
+    }
+
+    // Now compare the jacobians we got.
+    for (int i = 0; i < 2; ++i) {
+      for (int j = 0; j < 12 + 4; ++j) {
+        ASSERT_NEAR(J_PX[(12 + 4) * i + j], fd_J[(12 + 4) * i + j], err);
+      }
+
+      for (int j = 0; j < 12; ++j) {
+        ASSERT_NEAR(J_PX[(12 + 4) * i + j], J_P[12 * i + j], tol);
+      }
+      for (int j = 0; j < 4; ++j) {
+        ASSERT_NEAR(J_PX[(12 + 4) * i + 12 + j], J_X[4 * i + j], tol);
+      }
+    }
+  }
+}
+
+// Object to implement the projection by a calibrated camera.
+struct Metric {
+  // The mapping is
+  //
+  //   q, c, X -> x = dehomg(R(q) (X - c))
+  //
+  // where q is a quaternion and c is the center of projection.
+  //
+  // This function takes three input vectors.
+  template <typename A>
+  bool operator()(A const q[4], A const c[3], A const X[3], A x[2]) const {
+    A R[3 * 3];
+    QuaternionToScaledRotation(q, R);
+
+    // Convert the quaternion mapping all the way to projective matrix.
+    A P[3 * 4];
+
+    // Set P(:, 1:3) = R
+    for (int i = 0; i < 3; ++i) {
+      for (int j = 0; j < 3; ++j) {
+        RowMajorAccess(P, 3, 4, i, j) = RowMajorAccess(R, 3, 3, i, j);
+      }
+    }
+
+    // Set P(:, 4) = - R c
+    for (int i = 0; i < 3; ++i) {
+      RowMajorAccess(P, 3, 4, i, 3) =
+        - (RowMajorAccess(R, 3, 3, i, 0) * c[0] +
+           RowMajorAccess(R, 3, 3, i, 1) * c[1] +
+           RowMajorAccess(R, 3, 3, i, 2) * c[2]);
+    }
+
+    A X1[4] = { X[0], X[1], X[2], A(1) };
+    Projective p;
+    return p(P, X1, x);
+  }
+
+  // A version that takes a single vector.
+  template <typename A>
+  bool operator()(A const q_c_X[4 + 3 + 3], A x[2]) const {
+    return operator()(q_c_X, q_c_X + 4, q_c_X + 4 + 3, x);
+  }
+};
+
+// This test is similar in structure to the previous one.
+TEST(AutoDiff, Metric) {
+  srand(5);
+  double const tol = 1e-10;  // floating-point tolerance.
+  double const del = 1e-4;   // finite-difference step.
+  double const err = 1e-5;   // finite-difference tolerance.
+
+  Metric b;
+
+  // Make random parameter vector.
+  double qcX[4 + 3 + 3];
+  for (int i = 0; i < 4 + 3 + 3; ++i)
+    qcX[i] = RandDouble();
+
+  // Handy names.
+  double *q = qcX;
+  double *c = qcX + 4;
+  double *X = qcX + 4 + 3;
+
+  // Compute projection, b_x.
+  double b_x[2];
+  ASSERT_TRUE(b(q, c, X, b_x));
+
+  // Finite differencing estimate of Jacobian.
+  double fd_x[2];
+  double fd_J[2 * (4 + 3 + 3)];
+  ASSERT_TRUE((SymmetricDiff<Metric, double, 2, 4 + 3 + 3>(b, qcX, del,
+                                                           fd_x, fd_J)));
+
+  for (int i = 0; i < 2; ++i) {
+    ASSERT_NEAR(fd_x[i], b_x[i], tol);
+  }
+
+  // Automatic differentiation.
+  double ad_x[2];
+  double J_q[2 * 4];
+  double J_c[2 * 3];
+  double J_X[2 * 3];
+  double *parameters[] = { q, c, X };
+  double *jacobians[] = { J_q, J_c, J_X };
+  ASSERT_TRUE((AutoDifferentiate<StaticParameterDims<4, 3, 3>>(
+      b, parameters, 2, ad_x, jacobians)));
+
+  for (int i = 0; i < 2; ++i) {
+    ASSERT_NEAR(ad_x[i], b_x[i], tol);
+  }
+
+  // Compare the pieces.
+  for (int i = 0; i < 2; ++i) {
+    for (int j = 0; j < 4; ++j) {
+      ASSERT_NEAR(J_q[4 * i + j], fd_J[(4 + 3 + 3) * i + j], err);
+    }
+    for (int j = 0; j < 3; ++j) {
+      ASSERT_NEAR(J_c[3 * i + j], fd_J[(4 + 3 + 3) * i + j + 4], err);
+    }
+    for (int j = 0; j < 3; ++j) {
+      ASSERT_NEAR(J_X[3 * i + j], fd_J[(4 + 3 + 3) * i + j + 4 + 3], err);
+    }
+  }
+}
+
+struct VaryingResidualFunctor {
+  template <typename T>
+  bool operator()(const T x[2], T* y) const {
+    for (int i = 0; i < num_residuals; ++i) {
+      y[i] = T(i) * x[0] * x[1] * x[1];
+    }
+    return true;
+  }
+
+  int num_residuals;
+};
+
+TEST(AutoDiff, VaryingNumberOfResidualsForOneCostFunctorType) {
+  double x[2] = { 1.0, 5.5 };
+  double *parameters[] = { x };
+  const int kMaxResiduals = 10;
+  double J_x[2 * kMaxResiduals];
+  double residuals[kMaxResiduals];
+  double *jacobians[] = { J_x };
+
+  // Use a single functor, but tweak it to produce different numbers of
+  // residuals.
+  VaryingResidualFunctor functor;
+
+  for (int num_residuals = 1; num_residuals < kMaxResiduals; ++num_residuals) {
+    // Tweak the number of residuals to produce.
+    functor.num_residuals = num_residuals;
+
+    // Run autodiff with the new number of residuals.
+    ASSERT_TRUE((AutoDifferentiate<StaticParameterDims<2>>(
+        functor, parameters, num_residuals, residuals, jacobians)));
+
+    const double kTolerance = 1e-14;
+    for (int i = 0; i < num_residuals; ++i) {
+      EXPECT_NEAR(J_x[2 * i + 0], i * x[1] * x[1], kTolerance) << "i: " << i;
+      EXPECT_NEAR(J_x[2 * i + 1], 2 * i * x[0] * x[1], kTolerance)
+          << "i: " << i;
+    }
+  }
+}
+
+struct Residual1Param {
+  template <typename T>
+  bool operator()(const T* x0, T* y) const {
+    y[0] = *x0;
+    return true;
+  }
+};
+
+struct Residual2Param {
+  template <typename T>
+  bool operator()(const T* x0, const T* x1, T* y) const {
+    y[0] = *x0 + pow(*x1, 2);
+    return true;
+  }
+};
+
+struct Residual3Param {
+  template <typename T>
+  bool operator()(const T* x0, const T* x1, const T* x2, T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3);
+    return true;
+  }
+};
+
+struct Residual4Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4);
+    return true;
+  }
+};
+
+struct Residual5Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5);
+    return true;
+  }
+};
+
+struct Residual6Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5) +
+        pow(*x5, 6);
+    return true;
+  }
+};
+
+struct Residual7Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5) +
+        pow(*x5, 6) + pow(*x6, 7);
+    return true;
+  }
+};
+
+struct Residual8Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5) +
+        pow(*x5, 6) + pow(*x6, 7) + pow(*x7, 8);
+    return true;
+  }
+};
+
+struct Residual9Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  const T* x8,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5) +
+        pow(*x5, 6) + pow(*x6, 7) + pow(*x7, 8) + pow(*x8, 9);
+    return true;
+  }
+};
+
+struct Residual10Param {
+  template <typename T>
+  bool operator()(const T* x0,
+                  const T* x1,
+                  const T* x2,
+                  const T* x3,
+                  const T* x4,
+                  const T* x5,
+                  const T* x6,
+                  const T* x7,
+                  const T* x8,
+                  const T* x9,
+                  T* y) const {
+    y[0] = *x0 + pow(*x1, 2) + pow(*x2, 3) + pow(*x3, 4) + pow(*x4, 5) +
+        pow(*x5, 6) + pow(*x6, 7) + pow(*x7, 8) + pow(*x8, 9) + pow(*x9, 10);
+    return true;
+  }
+};
+
+TEST(AutoDiff, VariadicAutoDiff) {
+  double x[10];
+  double residual = 0;
+  double* parameters[10];
+  double jacobian_values[10];
+  double* jacobians[10];
+
+  for (int i = 0; i < 10; ++i) {
+    x[i] = 2.0;
+    parameters[i] = x + i;
+    jacobians[i] = jacobian_values + i;
+  }
+
+  {
+    Residual1Param functor;
+    int num_variables = 1;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual2Param functor;
+    int num_variables = 2;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual3Param functor;
+    int num_variables = 3;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual4Param functor;
+    int num_variables = 4;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual5Param functor;
+    int num_variables = 5;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual6Param functor;
+    int num_variables = 6;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual7Param functor;
+    int num_variables = 7;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual8Param functor;
+    int num_variables = 8;
+    EXPECT_TRUE((AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1, 1, 1, 1>>(
+        functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual9Param functor;
+    int num_variables = 9;
+    EXPECT_TRUE(
+        (AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1, 1, 1, 1, 1>>(
+            functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+
+  {
+    Residual10Param functor;
+    int num_variables = 10;
+    EXPECT_TRUE(
+        (AutoDifferentiate<StaticParameterDims<1, 1, 1, 1, 1, 1, 1, 1, 1, 1>>(
+            functor, parameters, 1, &residual, jacobians)));
+    EXPECT_EQ(residual, pow(2, num_variables + 1) - 2);
+    for (int i = 0; i < num_variables; ++i) {
+      EXPECT_EQ(jacobian_values[i], (i + 1) * pow(2, i));
+    }
+  }
+}
+
+// This is fragile test that triggers the alignment bug on
+// i686-apple-darwin10-llvm-g++-4.2 (GCC) 4.2.1. It is quite possible,
+// that other combinations of operating system + compiler will
+// re-arrange the operations in this test.
+//
+// But this is the best (and only) way we know of to trigger this
+// problem for now. A more robust solution that guarantees the
+// alignment of Eigen types used for automatic differentiation would
+// be nice.
+TEST(AutoDiff, AlignedAllocationTest) {
+  // This int is needed to allocate 16 bits on the stack, so that the
+  // next allocation is not aligned by default.
+  char y = 0;
+
+  // This is needed to prevent the compiler from optimizing y out of
+  // this function.
+  y += 1;
+
+  typedef Jet<double, 2> JetT;
+  FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(3);
+
+  // Need this to makes sure that x does not get optimized out.
+  x[0] = x[0] + JetT(1.0);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemm_benchmark.json b/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemm_benchmark.json
new file mode 100644
index 0000000..e24572d
--- /dev/null
+++ b/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemm_benchmark.json
@@ -0,0 +1,3833 @@
+{
+  "context": {
+    "date": "2018-03-23 13:15:00",
+    "num_cpus": 8,
+    "mhz_per_cpu": 2200,
+    "cpu_scaling_enabled": false,
+    "library_build_type": "release"
+  },
+  "benchmarks": [
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/1",
+      "iterations": 70805770,
+      "real_time": 9.7085774076082654e+00,
+      "cpu_time": 9.7053531089344833e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/2",
+      "iterations": 74727246,
+      "real_time": 1.0385020397774865e+01,
+      "cpu_time": 1.0330810264304402e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/3",
+      "iterations": 58161273,
+      "real_time": 1.1918587820938697e+01,
+      "cpu_time": 1.1860538196954527e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/4",
+      "iterations": 48633401,
+      "real_time": 1.3997796658213307e+01,
+      "cpu_time": 1.3981090896768663e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/8",
+      "iterations": 32240533,
+      "real_time": 2.1278021890062710e+01,
+      "cpu_time": 2.1243600408219077e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/12",
+      "iterations": 25863853,
+      "real_time": 2.6210347317374435e+01,
+      "cpu_time": 2.6019054469571898e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/1/15",
+      "iterations": 18352905,
+      "real_time": 3.6613193819894164e+01,
+      "cpu_time": 3.6547020757749202e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/1",
+      "iterations": 73026206,
+      "real_time": 9.8186158953423952e+00,
+      "cpu_time": 9.8166677315811768e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/2",
+      "iterations": 58211574,
+      "real_time": 1.2254592290921693e+01,
+      "cpu_time": 1.2253937679128907e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/3",
+      "iterations": 41788051,
+      "real_time": 1.6580228591773523e+01,
+      "cpu_time": 1.6553320469528451e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/4",
+      "iterations": 37355846,
+      "real_time": 1.8618565967193987e+01,
+      "cpu_time": 1.8617300221229108e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/8",
+      "iterations": 23951522,
+      "real_time": 2.9064576941597569e+01,
+      "cpu_time": 2.9063497509678122e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/12",
+      "iterations": 17955394,
+      "real_time": 3.9849777902603556e+01,
+      "cpu_time": 3.9844906772861613e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/2/15",
+      "iterations": 15072693,
+      "real_time": 4.7936877638126703e+01,
+      "cpu_time": 4.7922026939711373e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/1",
+      "iterations": 70635009,
+      "real_time": 1.0202552222711139e+01,
+      "cpu_time": 1.0198639600937826e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/2",
+      "iterations": 49235444,
+      "real_time": 1.5070878878523310e+01,
+      "cpu_time": 1.5068494152302112e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/3",
+      "iterations": 38174808,
+      "real_time": 1.7475222821439619e+01,
+      "cpu_time": 1.7473879632872062e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/4",
+      "iterations": 34106744,
+      "real_time": 2.0029404214340389e+01,
+      "cpu_time": 2.0028736838673233e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/8",
+      "iterations": 20587933,
+      "real_time": 3.3617301212484371e+01,
+      "cpu_time": 3.3614787846842148e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/12",
+      "iterations": 15313500,
+      "real_time": 4.6273360696817100e+01,
+      "cpu_time": 4.6255069056714703e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/3/15",
+      "iterations": 11989586,
+      "real_time": 5.6997383313299295e+01,
+      "cpu_time": 5.6992209739351992e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/1",
+      "iterations": 62076549,
+      "real_time": 1.1642173391475032e+01,
+      "cpu_time": 1.1573790933513360e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/2",
+      "iterations": 43844265,
+      "real_time": 1.6274509083373967e+01,
+      "cpu_time": 1.6270428983129257e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/3",
+      "iterations": 32460306,
+      "real_time": 2.0151312527943880e+01,
+      "cpu_time": 2.0150641833136220e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/4",
+      "iterations": 30627603,
+      "real_time": 2.3542954928378649e+01,
+      "cpu_time": 2.3537787139267902e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/8",
+      "iterations": 16957323,
+      "real_time": 3.8976102596893007e+01,
+      "cpu_time": 3.8964463907422207e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/12",
+      "iterations": 11970314,
+      "real_time": 5.6122851917381425e+01,
+      "cpu_time": 5.6107550729245602e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/4/15",
+      "iterations": 9639749,
+      "real_time": 7.5149670604821367e+01,
+      "cpu_time": 7.5115545020933837e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/1",
+      "iterations": 46089625,
+      "real_time": 1.4720880177102261e+01,
+      "cpu_time": 1.4720059015450884e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/2",
+      "iterations": 35845047,
+      "real_time": 2.0075989328449982e+01,
+      "cpu_time": 2.0073735710264213e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/3",
+      "iterations": 27662955,
+      "real_time": 2.5734411745732761e+01,
+      "cpu_time": 2.5734018654189445e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/4",
+      "iterations": 21879855,
+      "real_time": 3.4106283108315687e+01,
+      "cpu_time": 3.4089714031468731e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/8",
+      "iterations": 10406446,
+      "real_time": 5.9840372689427156e+01,
+      "cpu_time": 5.9830512741813799e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/12",
+      "iterations": 8903134,
+      "real_time": 8.5126016525115261e+01,
+      "cpu_time": 8.5104750754060177e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/8/15",
+      "iterations": 6940176,
+      "real_time": 1.0028962911611278e+02,
+      "cpu_time": 1.0025855252085725e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/1",
+      "iterations": 35671516,
+      "real_time": 1.8399173111134672e+01,
+      "cpu_time": 1.8397255670322537e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/2",
+      "iterations": 25982607,
+      "real_time": 2.7018409545851057e+01,
+      "cpu_time": 2.7011839112218500e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/3",
+      "iterations": 18737168,
+      "real_time": 3.3898883702445445e+01,
+      "cpu_time": 3.3861787437674536e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/4",
+      "iterations": 15761399,
+      "real_time": 4.5105465061861274e+01,
+      "cpu_time": 4.5099042286791622e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/8",
+      "iterations": 9303562,
+      "real_time": 7.9825693855906351e+01,
+      "cpu_time": 7.9811151900745131e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/12",
+      "iterations": 5956180,
+      "real_time": 1.2225934256378150e+02,
+      "cpu_time": 1.2222196105557559e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/12/15",
+      "iterations": 4506302,
+      "real_time": 1.4818435404415453e+02,
+      "cpu_time": 1.4815784650030173e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/1",
+      "iterations": 35319999,
+      "real_time": 1.9630753360924967e+01,
+      "cpu_time": 1.9626416184213337e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/2",
+      "iterations": 23526644,
+      "real_time": 3.0066773439214494e+01,
+      "cpu_time": 3.0055710453220922e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/3",
+      "iterations": 17598729,
+      "real_time": 4.1083973283990723e+01,
+      "cpu_time": 4.1070806874746893e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/4",
+      "iterations": 12271659,
+      "real_time": 5.3032831420852794e+01,
+      "cpu_time": 5.3028526949779170e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/8",
+      "iterations": 7952738,
+      "real_time": 9.2429693026570703e+01,
+      "cpu_time": 9.2422131849433242e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/12",
+      "iterations": 4950880,
+      "real_time": 1.2875667093472620e+02,
+      "cpu_time": 1.2874357689946041e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/1/15/15",
+      "iterations": 3689648,
+      "real_time": 1.7201106256625351e+02,
+      "cpu_time": 1.7199635303963902e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/1",
+      "iterations": 71110750,
+      "real_time": 1.0465632580976097e+01,
+      "cpu_time": 1.0462187503295935e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/2",
+      "iterations": 38512960,
+      "real_time": 1.5239588699711753e+01,
+      "cpu_time": 1.5233105946673353e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/3",
+      "iterations": 39388909,
+      "real_time": 1.8149611024686859e+01,
+      "cpu_time": 1.8142772118923137e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/4",
+      "iterations": 32544668,
+      "real_time": 2.0374460451575882e+01,
+      "cpu_time": 2.0370863823222830e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/8",
+      "iterations": 22523030,
+      "real_time": 3.1021126554300547e+01,
+      "cpu_time": 3.1018428692764868e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/12",
+      "iterations": 16353383,
+      "real_time": 4.2191290576420187e+01,
+      "cpu_time": 4.2190903252250827e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/1/15",
+      "iterations": 13760296,
+      "real_time": 6.3105678174993102e+01,
+      "cpu_time": 6.3084180747274651e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/1",
+      "iterations": 53321552,
+      "real_time": 1.3571924725902662e+01,
+      "cpu_time": 1.3570760280946285e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/2",
+      "iterations": 34553227,
+      "real_time": 1.9240029591502111e+01,
+      "cpu_time": 1.9238347839407254e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/3",
+      "iterations": 26606966,
+      "real_time": 2.5684070292204609e+01,
+      "cpu_time": 2.5683123735340747e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/4",
+      "iterations": 21813649,
+      "real_time": 3.3015750690106444e+01,
+      "cpu_time": 3.3012312612163001e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/8",
+      "iterations": 13165814,
+      "real_time": 5.3961429801958381e+01,
+      "cpu_time": 5.3950329239043157e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/12",
+      "iterations": 9855407,
+      "real_time": 7.7330575997830607e+01,
+      "cpu_time": 7.7299699545640294e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/2/15",
+      "iterations": 7160320,
+      "real_time": 9.3059447338051214e+01,
+      "cpu_time": 9.3031456694673452e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/1",
+      "iterations": 44724434,
+      "real_time": 1.5451907116408652e+01,
+      "cpu_time": 1.5450704194490172e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/2",
+      "iterations": 31958363,
+      "real_time": 2.2135768501125039e+01,
+      "cpu_time": 2.2130576588043589e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/3",
+      "iterations": 22712598,
+      "real_time": 2.8762911975815388e+01,
+      "cpu_time": 2.8760734461112644e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/4",
+      "iterations": 19248749,
+      "real_time": 3.3782859186998586e+01,
+      "cpu_time": 3.3780377103987405e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/8",
+      "iterations": 11206634,
+      "real_time": 6.0281978239576361e+01,
+      "cpu_time": 6.0263233366950296e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/12",
+      "iterations": 7864550,
+      "real_time": 8.5909877865634058e+01,
+      "cpu_time": 8.5903198530112036e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/3/15",
+      "iterations": 6630295,
+      "real_time": 1.0815029754593648e+02,
+      "cpu_time": 1.0812912547631571e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/1",
+      "iterations": 40941658,
+      "real_time": 1.5922779899567644e+01,
+      "cpu_time": 1.5922657553340848e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/2",
+      "iterations": 27517887,
+      "real_time": 2.4661230893842301e+01,
+      "cpu_time": 2.4659415165124962e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/3",
+      "iterations": 21047188,
+      "real_time": 3.2587989757196986e+01,
+      "cpu_time": 3.2584495372968519e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/4",
+      "iterations": 17532786,
+      "real_time": 4.0907714269629892e+01,
+      "cpu_time": 4.0893672003981131e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/8",
+      "iterations": 10142723,
+      "real_time": 7.0529643763447154e+01,
+      "cpu_time": 7.0525045394614580e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/12",
+      "iterations": 7004763,
+      "real_time": 1.0097909736219086e+02,
+      "cpu_time": 1.0097914804540859e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/4/15",
+      "iterations": 4970108,
+      "real_time": 1.3961298447974306e+02,
+      "cpu_time": 1.3959455207009790e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/1",
+      "iterations": 29905201,
+      "real_time": 2.1529462050555786e+01,
+      "cpu_time": 2.1529265093385952e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/2",
+      "iterations": 21023483,
+      "real_time": 3.3446591554646147e+01,
+      "cpu_time": 3.3438988201907023e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/3",
+      "iterations": 15687962,
+      "real_time": 4.4666488545947821e+01,
+      "cpu_time": 4.4652390157497358e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/4",
+      "iterations": 12333715,
+      "real_time": 5.8456414226244306e+01,
+      "cpu_time": 5.8452218167843370e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/8",
+      "iterations": 6708579,
+      "real_time": 1.0254277962264361e+02,
+      "cpu_time": 1.0254228205406758e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/12",
+      "iterations": 4610116,
+      "real_time": 1.5292074472764102e+02,
+      "cpu_time": 1.5290352780710978e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/8/15",
+      "iterations": 3740855,
+      "real_time": 1.8836253878858312e+02,
+      "cpu_time": 1.8835346464912590e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/1",
+      "iterations": 26222827,
+      "real_time": 2.6458015643267686e+01,
+      "cpu_time": 2.6457444881896041e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/2",
+      "iterations": 16369904,
+      "real_time": 4.2282200980283513e+01,
+      "cpu_time": 4.2270925962668763e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/3",
+      "iterations": 12297530,
+      "real_time": 5.6847899220390829e+01,
+      "cpu_time": 5.6844911132561492e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/4",
+      "iterations": 9635768,
+      "real_time": 7.4954440888332272e+01,
+      "cpu_time": 7.4952821612143552e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/8",
+      "iterations": 4942805,
+      "real_time": 1.4190911294302057e+02,
+      "cpu_time": 1.4189574543199674e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/12",
+      "iterations": 2953823,
+      "real_time": 2.4287571296928039e+02,
+      "cpu_time": 2.4285578384351490e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/12/15",
+      "iterations": 2647384,
+      "real_time": 2.6651603998259009e+02,
+      "cpu_time": 2.6651366027747002e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/1",
+      "iterations": 24894908,
+      "real_time": 2.8931135794891475e+01,
+      "cpu_time": 2.8924790563596211e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/2",
+      "iterations": 10000000,
+      "real_time": 5.0149341800715774e+01,
+      "cpu_time": 5.0140900000000954e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/3",
+      "iterations": 10387914,
+      "real_time": 6.7789764718536929e+01,
+      "cpu_time": 6.7788874647981260e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/4",
+      "iterations": 8232001,
+      "real_time": 8.7254512836870219e+01,
+      "cpu_time": 8.7242457817970333e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/8",
+      "iterations": 4078969,
+      "real_time": 1.6140622300919441e+02,
+      "cpu_time": 1.6140279565743089e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/12",
+      "iterations": 2913983,
+      "real_time": 2.4624524953984053e+02,
+      "cpu_time": 2.4622140897871080e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/2/15/15",
+      "iterations": 1980545,
+      "real_time": 3.5652687670272678e+02,
+      "cpu_time": 3.5642613523045270e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/1",
+      "iterations": 56068628,
+      "real_time": 1.2794681797167486e+01,
+      "cpu_time": 1.2793286113582157e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/2",
+      "iterations": 39670171,
+      "real_time": 1.7598980981713570e+01,
+      "cpu_time": 1.7598336039438660e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/3",
+      "iterations": 31158472,
+      "real_time": 2.2851131887169714e+01,
+      "cpu_time": 2.2848938163591246e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/4",
+      "iterations": 26125739,
+      "real_time": 2.5647778157089107e+01,
+      "cpu_time": 2.5647810383468983e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/8",
+      "iterations": 16180744,
+      "real_time": 4.4158160894780949e+01,
+      "cpu_time": 4.4148896985206250e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/12",
+      "iterations": 11614209,
+      "real_time": 6.1220418967741509e+01,
+      "cpu_time": 6.1203737594182591e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/1/15",
+      "iterations": 7775445,
+      "real_time": 8.8192231440357872e+01,
+      "cpu_time": 8.8191351105949565e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/1",
+      "iterations": 42433758,
+      "real_time": 1.6789013218782308e+01,
+      "cpu_time": 1.6788143062888754e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/2",
+      "iterations": 27240110,
+      "real_time": 2.6047708914648279e+01,
+      "cpu_time": 2.6045709800731100e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/3",
+      "iterations": 21036625,
+      "real_time": 3.4483314841215552e+01,
+      "cpu_time": 3.4475159394627752e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/4",
+      "iterations": 16349143,
+      "real_time": 4.0972523638050760e+01,
+      "cpu_time": 4.0972545166434649e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/8",
+      "iterations": 9121829,
+      "real_time": 7.3690651728074570e+01,
+      "cpu_time": 7.3678206421101962e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/12",
+      "iterations": 6573758,
+      "real_time": 1.0611354388329170e+02,
+      "cpu_time": 1.0610658317510315e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/2/15",
+      "iterations": 5281106,
+      "real_time": 1.2952993671592768e+02,
+      "cpu_time": 1.2952267952963081e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/1",
+      "iterations": 37443767,
+      "real_time": 1.8692536893444267e+01,
+      "cpu_time": 1.8691949450491798e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/2",
+      "iterations": 24253512,
+      "real_time": 2.8977756462325814e+01,
+      "cpu_time": 2.8977040520977290e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/3",
+      "iterations": 18031942,
+      "real_time": 3.8052576424921078e+01,
+      "cpu_time": 3.8050310942659422e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/4",
+      "iterations": 14793400,
+      "real_time": 4.8389559534583100e+01,
+      "cpu_time": 4.8378871658984025e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/8",
+      "iterations": 7509118,
+      "real_time": 8.7285071553661098e+01,
+      "cpu_time": 8.7281622155892137e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/12",
+      "iterations": 5617437,
+      "real_time": 1.2594982336650604e+02,
+      "cpu_time": 1.2594996615004284e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/3/15",
+      "iterations": 4235468,
+      "real_time": 1.5662988364866754e+02,
+      "cpu_time": 1.5658930725010819e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/1",
+      "iterations": 33582967,
+      "real_time": 2.0666642557179625e+01,
+      "cpu_time": 2.0664940057261749e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/2",
+      "iterations": 20330280,
+      "real_time": 3.4482593843841727e+01,
+      "cpu_time": 3.4467995521949824e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/3",
+      "iterations": 14817261,
+      "real_time": 4.4999856386574585e+01,
+      "cpu_time": 4.4999882231946614e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/4",
+      "iterations": 12703713,
+      "real_time": 5.7508433168632642e+01,
+      "cpu_time": 5.7498071626775889e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/8",
+      "iterations": 6803250,
+      "real_time": 1.0353664954199472e+02,
+      "cpu_time": 1.0353022452504064e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/12",
+      "iterations": 4761484,
+      "real_time": 1.5105595102614049e+02,
+      "cpu_time": 1.5101636380590460e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/4/15",
+      "iterations": 3477121,
+      "real_time": 2.0255288986918265e+02,
+      "cpu_time": 2.0255291662268490e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/1",
+      "iterations": 24105845,
+      "real_time": 3.0166071218608963e+01,
+      "cpu_time": 3.0152521100172752e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/2",
+      "iterations": 14274178,
+      "real_time": 4.7932456426425794e+01,
+      "cpu_time": 4.7932427352385140e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/3",
+      "iterations": 11208070,
+      "real_time": 6.4670979126310669e+01,
+      "cpu_time": 6.4656626876885724e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/4",
+      "iterations": 8661544,
+      "real_time": 8.1036356563132159e+01,
+      "cpu_time": 8.1029779448099646e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/8",
+      "iterations": 4408532,
+      "real_time": 1.5933889874913777e+02,
+      "cpu_time": 1.5933898177443112e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/12",
+      "iterations": 3025836,
+      "real_time": 2.2925236332784999e+02,
+      "cpu_time": 2.2921764431383914e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/8/15",
+      "iterations": 2491999,
+      "real_time": 2.8641806799325639e+02,
+      "cpu_time": 2.8639016307791479e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/1",
+      "iterations": 19248855,
+      "real_time": 3.5610818100409780e+01,
+      "cpu_time": 3.5608871280915714e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/2",
+      "iterations": 12091481,
+      "real_time": 5.8460851325769724e+01,
+      "cpu_time": 5.8460084418112018e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/3",
+      "iterations": 8702571,
+      "real_time": 8.2073278220417663e+01,
+      "cpu_time": 8.2066437607921230e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/4",
+      "iterations": 6588049,
+      "real_time": 1.0495601975858659e+02,
+      "cpu_time": 1.0495353024848519e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/8",
+      "iterations": 3441545,
+      "real_time": 2.0682049776183950e+02,
+      "cpu_time": 2.0680363034625137e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/12",
+      "iterations": 1913520,
+      "real_time": 3.5206489242527891e+02,
+      "cpu_time": 3.5197646222668027e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/12/15",
+      "iterations": 1769187,
+      "real_time": 3.9255757818155735e+02,
+      "cpu_time": 3.9251644964607959e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/1",
+      "iterations": 17529493,
+      "real_time": 4.0357623289387469e+01,
+      "cpu_time": 4.0355017683627686e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/2",
+      "iterations": 10120287,
+      "real_time": 6.8401365789683695e+01,
+      "cpu_time": 6.8394305418413964e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/3",
+      "iterations": 7331839,
+      "real_time": 9.5766700952043223e+01,
+      "cpu_time": 9.5763832239086980e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/4",
+      "iterations": 5777437,
+      "real_time": 1.2417593458644886e+02,
+      "cpu_time": 1.2417166989445052e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/8",
+      "iterations": 2843390,
+      "real_time": 2.4545334972027791e+02,
+      "cpu_time": 2.4543344388213094e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/12",
+      "iterations": 2003899,
+      "real_time": 3.4996039020923655e+02,
+      "cpu_time": 3.4996075151492676e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/3/15/15",
+      "iterations": 1369917,
+      "real_time": 5.3407925510682742e+02,
+      "cpu_time": 5.3400388490691137e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/1",
+      "iterations": 43784206,
+      "real_time": 1.5567154102293838e+01,
+      "cpu_time": 1.5566777664073646e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/2",
+      "iterations": 33442579,
+      "real_time": 2.1867037525565301e+01,
+      "cpu_time": 2.1860634611941059e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/3",
+      "iterations": 26232949,
+      "real_time": 2.7432051955857808e+01,
+      "cpu_time": 2.7423260724518492e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/4",
+      "iterations": 21726783,
+      "real_time": 3.3097775082474257e+01,
+      "cpu_time": 3.3093072269374041e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/8",
+      "iterations": 12635379,
+      "real_time": 5.5527680649909243e+01,
+      "cpu_time": 5.5527736841135230e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/12",
+      "iterations": 7856077,
+      "real_time": 8.8061040521836091e+01,
+      "cpu_time": 8.8058200040553615e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/1/15",
+      "iterations": 5300460,
+      "real_time": 1.2661529226403675e+02,
+      "cpu_time": 1.2660165344139702e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/1",
+      "iterations": 34390768,
+      "real_time": 2.0377612358162786e+01,
+      "cpu_time": 2.0377561792164304e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/2",
+      "iterations": 20908879,
+      "real_time": 3.4186949283789282e+01,
+      "cpu_time": 3.4171846324233805e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/3",
+      "iterations": 15411815,
+      "real_time": 4.5325916440705925e+01,
+      "cpu_time": 4.5313287240988785e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/4",
+      "iterations": 12438032,
+      "real_time": 5.4548608170088684e+01,
+      "cpu_time": 5.4541345447574550e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/8",
+      "iterations": 7252233,
+      "real_time": 9.5601186552427208e+01,
+      "cpu_time": 9.5590282330976379e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/12",
+      "iterations": 4715647,
+      "real_time": 1.4106606686984432e+02,
+      "cpu_time": 1.4105275479695544e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/2/15",
+      "iterations": 4108608,
+      "real_time": 1.7176933988395049e+02,
+      "cpu_time": 1.7176571724535870e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/1",
+      "iterations": 30653086,
+      "real_time": 2.3378084249479144e+01,
+      "cpu_time": 2.3371578313517723e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/2",
+      "iterations": 18987479,
+      "real_time": 3.6601283509988470e+01,
+      "cpu_time": 3.6600540809024764e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/3",
+      "iterations": 14597262,
+      "real_time": 4.8923792148898187e+01,
+      "cpu_time": 4.8915611708551786e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/4",
+      "iterations": 11609779,
+      "real_time": 6.1787660821110308e+01,
+      "cpu_time": 6.1783949548049065e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/8",
+      "iterations": 6170445,
+      "real_time": 1.1533432271722650e+02,
+      "cpu_time": 1.1532134230189035e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/12",
+      "iterations": 3942551,
+      "real_time": 1.7028375181379528e+02,
+      "cpu_time": 1.7027756901559130e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/3/15",
+      "iterations": 3143680,
+      "real_time": 2.1163193262032627e+02,
+      "cpu_time": 2.1153902432817358e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/1",
+      "iterations": 26297796,
+      "real_time": 2.7622421285563462e+01,
+      "cpu_time": 2.7617903796958281e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/2",
+      "iterations": 16116963,
+      "real_time": 4.2681934678112448e+01,
+      "cpu_time": 4.2673548360196982e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/3",
+      "iterations": 11333463,
+      "real_time": 5.9496259266019280e+01,
+      "cpu_time": 5.9488260560782976e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/4",
+      "iterations": 9425832,
+      "real_time": 7.2428386902398671e+01,
+      "cpu_time": 7.2426816009451187e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/8",
+      "iterations": 5303754,
+      "real_time": 1.3631801739144950e+02,
+      "cpu_time": 1.3630609564470390e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/12",
+      "iterations": 3471792,
+      "real_time": 2.0895634672966241e+02,
+      "cpu_time": 2.0888780203422081e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/4/15",
+      "iterations": 2520089,
+      "real_time": 2.8573527243793194e+02,
+      "cpu_time": 2.8568475160995524e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/1",
+      "iterations": 19096621,
+      "real_time": 3.6688983777001837e+01,
+      "cpu_time": 3.6686385512912835e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/2",
+      "iterations": 11715481,
+      "real_time": 5.9865703340908404e+01,
+      "cpu_time": 5.9863867305149718e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/3",
+      "iterations": 8174992,
+      "real_time": 8.2275007966704990e+01,
+      "cpu_time": 8.2264056038215699e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/4",
+      "iterations": 6602839,
+      "real_time": 1.0513826369751193e+02,
+      "cpu_time": 1.0513825946687413e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/8",
+      "iterations": 3103153,
+      "real_time": 2.2933769686453937e+02,
+      "cpu_time": 2.2922105355424063e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/12",
+      "iterations": 2360909,
+      "real_time": 3.0008286085974277e+02,
+      "cpu_time": 3.0007382749609508e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/8/15",
+      "iterations": 1889522,
+      "real_time": 3.6764128913401316e+02,
+      "cpu_time": 3.6759773106638625e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/1",
+      "iterations": 15594924,
+      "real_time": 4.5340802243594723e+01,
+      "cpu_time": 4.5338855130040670e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/2",
+      "iterations": 9416323,
+      "real_time": 7.7935394432093759e+01,
+      "cpu_time": 7.7923197834230493e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/3",
+      "iterations": 6504911,
+      "real_time": 1.0715989640776804e+02,
+      "cpu_time": 1.0713782248519193e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/4",
+      "iterations": 5121452,
+      "real_time": 1.4158639932237980e+02,
+      "cpu_time": 1.4156454068104333e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/8",
+      "iterations": 2629809,
+      "real_time": 2.6729956094762065e+02,
+      "cpu_time": 2.6729964039213291e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/12",
+      "iterations": 1465266,
+      "real_time": 4.8055055047316051e+02,
+      "cpu_time": 4.8045406090090495e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/12/15",
+      "iterations": 1347579,
+      "real_time": 5.0750709527862261e+02,
+      "cpu_time": 5.0747525748026823e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/1",
+      "iterations": 13992164,
+      "real_time": 5.1035651877709000e+01,
+      "cpu_time": 5.1031848969179904e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/2",
+      "iterations": 8114437,
+      "real_time": 9.1338025169208450e+01,
+      "cpu_time": 9.1289266279351281e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/3",
+      "iterations": 5584453,
+      "real_time": 1.3020937251867338e+02,
+      "cpu_time": 1.3020093463048300e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/4",
+      "iterations": 4225191,
+      "real_time": 1.6777847509516360e+02,
+      "cpu_time": 1.6776780031956187e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/8",
+      "iterations": 2208097,
+      "real_time": 3.1420101474920136e+02,
+      "cpu_time": 3.1417641525712475e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/12",
+      "iterations": 1362239,
+      "real_time": 4.7193459299910199e+02,
+      "cpu_time": 4.7192966872919396e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/4/15/15",
+      "iterations": 1013934,
+      "real_time": 6.9074462639740500e+02,
+      "cpu_time": 6.9067611895843174e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/1",
+      "iterations": 35883083,
+      "real_time": 1.9658060568729933e+01,
+      "cpu_time": 1.9653244399317629e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/2",
+      "iterations": 24056388,
+      "real_time": 2.9539796622717901e+01,
+      "cpu_time": 2.9535938645485025e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/3",
+      "iterations": 18012176,
+      "real_time": 3.7624106551207404e+01,
+      "cpu_time": 3.7622994578778332e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/4",
+      "iterations": 14678345,
+      "real_time": 4.5918100918078160e+01,
+      "cpu_time": 4.5913827478505929e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/8",
+      "iterations": 7894084,
+      "real_time": 8.9416150485226595e+01,
+      "cpu_time": 8.9416200790360904e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/12",
+      "iterations": 5637614,
+      "real_time": 1.2666981333290803e+02,
+      "cpu_time": 1.2665606407249378e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/1/15",
+      "iterations": 3823507,
+      "real_time": 1.8736620963154931e+02,
+      "cpu_time": 1.8730395942781817e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/1",
+      "iterations": 24223046,
+      "real_time": 2.8497691000545316e+01,
+      "cpu_time": 2.8491338372555116e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/2",
+      "iterations": 15398423,
+      "real_time": 4.5795887799865483e+01,
+      "cpu_time": 4.5794819378582773e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/3",
+      "iterations": 11563176,
+      "real_time": 6.2808566696334694e+01,
+      "cpu_time": 6.2802987691271042e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/4",
+      "iterations": 9189970,
+      "real_time": 7.9181958046114659e+01,
+      "cpu_time": 7.9176972286086510e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/8",
+      "iterations": 5045518,
+      "real_time": 1.4116900465724606e+02,
+      "cpu_time": 1.4116370212137321e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/12",
+      "iterations": 3222317,
+      "real_time": 2.1623090867295952e+02,
+      "cpu_time": 2.1613391854370431e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/2/15",
+      "iterations": 2661132,
+      "real_time": 2.5762627518753141e+02,
+      "cpu_time": 2.5762607792473091e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/1",
+      "iterations": 21216879,
+      "real_time": 3.1723128644065358e+01,
+      "cpu_time": 3.1721536423899945e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/2",
+      "iterations": 12753475,
+      "real_time": 5.0896364085014220e+01,
+      "cpu_time": 5.0893109525054015e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/3",
+      "iterations": 10091108,
+      "real_time": 7.2624663025969213e+01,
+      "cpu_time": 7.2617595609916108e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/4",
+      "iterations": 7049416,
+      "real_time": 9.1072403728374738e+01,
+      "cpu_time": 9.1064848492413887e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/8",
+      "iterations": 4191818,
+      "real_time": 1.7144981293848105e+02,
+      "cpu_time": 1.7140486538298720e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/12",
+      "iterations": 2730067,
+      "real_time": 2.5822554940052413e+02,
+      "cpu_time": 2.5821783860981856e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/3/15",
+      "iterations": 2247191,
+      "real_time": 3.2026805378443191e+02,
+      "cpu_time": 3.2021443660107224e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/1",
+      "iterations": 18121429,
+      "real_time": 3.8547122245533529e+01,
+      "cpu_time": 3.8529798063938813e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/2",
+      "iterations": 11586335,
+      "real_time": 6.0852216685201718e+01,
+      "cpu_time": 6.0840377910704561e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/3",
+      "iterations": 7775445,
+      "real_time": 8.3481185069736838e+01,
+      "cpu_time": 8.3473421778429085e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/4",
+      "iterations": 5806575,
+      "real_time": 1.0983559688561705e+02,
+      "cpu_time": 1.0980741659239386e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/8",
+      "iterations": 3438553,
+      "real_time": 2.1037594070842158e+02,
+      "cpu_time": 2.1031026713853169e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/12",
+      "iterations": 2353234,
+      "real_time": 3.0354535504545981e+02,
+      "cpu_time": 3.0353844963994919e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/4/15",
+      "iterations": 1707234,
+      "real_time": 4.1212674833869283e+02,
+      "cpu_time": 4.1206594995178187e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/1",
+      "iterations": 13362349,
+      "real_time": 5.2982092895293206e+01,
+      "cpu_time": 5.2980205800642366e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/2",
+      "iterations": 8183210,
+      "real_time": 8.6807440359333526e+01,
+      "cpu_time": 8.6797234825940336e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/3",
+      "iterations": 5575956,
+      "real_time": 1.2095788793507765e+02,
+      "cpu_time": 1.2092742482186435e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/4",
+      "iterations": 4406784,
+      "real_time": 1.6091419163453321e+02,
+      "cpu_time": 1.6090282618798929e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/8",
+      "iterations": 2054986,
+      "real_time": 3.2413642234595721e+02,
+      "cpu_time": 3.2408687942400189e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/12",
+      "iterations": 1551202,
+      "real_time": 4.4303657167700601e+02,
+      "cpu_time": 4.4300548864687693e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/8/15",
+      "iterations": 1191347,
+      "real_time": 5.5736654808529727e+02,
+      "cpu_time": 5.5727424503526106e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/1",
+      "iterations": 10931863,
+      "real_time": 6.4586304002781674e+01,
+      "cpu_time": 6.4583227945685735e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/2",
+      "iterations": 6480881,
+      "real_time": 1.1095950659776847e+02,
+      "cpu_time": 1.1091346994336570e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/3",
+      "iterations": 4402294,
+      "real_time": 1.6057101819994259e+02,
+      "cpu_time": 1.6057105681719801e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/4",
+      "iterations": 3377482,
+      "real_time": 2.1260607102163365e+02,
+      "cpu_time": 2.1259239871595997e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/8",
+      "iterations": 1774438,
+      "real_time": 4.0620272672379355e+02,
+      "cpu_time": 4.0617254589902825e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/12",
+      "iterations": 1019947,
+      "real_time": 6.9195249654498321e+02,
+      "cpu_time": 6.9188300960735296e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/12/15",
+      "iterations": 884553,
+      "real_time": 7.5831573007795248e+02,
+      "cpu_time": 7.5825982162740524e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/1",
+      "iterations": 9880168,
+      "real_time": 7.0872998011871360e+01,
+      "cpu_time": 7.0861244464668601e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/2",
+      "iterations": 5605516,
+      "real_time": 1.3012630738160823e+02,
+      "cpu_time": 1.3010452561369684e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/3",
+      "iterations": 3594887,
+      "real_time": 1.8727335103647246e+02,
+      "cpu_time": 1.8727320218965750e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/4",
+      "iterations": 2914275,
+      "real_time": 2.5004251521486941e+02,
+      "cpu_time": 2.5001964468006324e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/8",
+      "iterations": 1496673,
+      "real_time": 4.5904399752450018e+02,
+      "cpu_time": 4.5904415994676094e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/12",
+      "iterations": 1019264,
+      "real_time": 6.9263589708023426e+02,
+      "cpu_time": 6.9257523075474012e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/6/15/15",
+      "iterations": 703822,
+      "real_time": 1.0274121383928141e+03,
+      "cpu_time": 1.0269911994794356e+03,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/1",
+      "iterations": 28848967,
+      "real_time": 2.4540278929744005e+01,
+      "cpu_time": 2.4540116115768196e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/2",
+      "iterations": 16448953,
+      "real_time": 4.1712574414326596e+01,
+      "cpu_time": 4.1707396209352183e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/3",
+      "iterations": 14226312,
+      "real_time": 4.9027181393759875e+01,
+      "cpu_time": 4.9021137734081250e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/4",
+      "iterations": 11454941,
+      "real_time": 5.9979401986663639e+01,
+      "cpu_time": 5.9975865436584797e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/8",
+      "iterations": 6124502,
+      "real_time": 1.1666736185728730e+02,
+      "cpu_time": 1.1663968760235049e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/12",
+      "iterations": 3998583,
+      "real_time": 1.6762970205238750e+02,
+      "cpu_time": 1.6757736428129780e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/1/15",
+      "iterations": 2814308,
+      "real_time": 2.4954055349218976e+02,
+      "cpu_time": 2.4952741490982092e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/1",
+      "iterations": 18724488,
+      "real_time": 3.5947751575164503e+01,
+      "cpu_time": 3.5943626335737079e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/2",
+      "iterations": 11921590,
+      "real_time": 5.9207167255952854e+01,
+      "cpu_time": 5.9207203066035085e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/3",
+      "iterations": 8754158,
+      "real_time": 8.2119780673261829e+01,
+      "cpu_time": 8.2108410654686921e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/4",
+      "iterations": 6998810,
+      "real_time": 1.0214988562003887e+02,
+      "cpu_time": 1.0214522183056609e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/8",
+      "iterations": 3592562,
+      "real_time": 1.9437585320576306e+02,
+      "cpu_time": 1.9436908813265629e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/12",
+      "iterations": 2396431,
+      "real_time": 2.8026308038860464e+02,
+      "cpu_time": 2.8025509601569246e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/2/15",
+      "iterations": 2046951,
+      "real_time": 3.5115058444827230e+02,
+      "cpu_time": 3.5108852141550892e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/1",
+      "iterations": 16325388,
+      "real_time": 3.9796095745111217e+01,
+      "cpu_time": 3.9796113881031708e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/2",
+      "iterations": 10737679,
+      "real_time": 6.5906308716831845e+01,
+      "cpu_time": 6.5905024726480917e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/3",
+      "iterations": 7088320,
+      "real_time": 9.3004828643787533e+01,
+      "cpu_time": 9.2977461514156531e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/4",
+      "iterations": 5870267,
+      "real_time": 1.2000603959593791e+02,
+      "cpu_time": 1.2000510368608558e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/8",
+      "iterations": 3068143,
+      "real_time": 2.3626721927352122e+02,
+      "cpu_time": 2.3623996665083703e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/12",
+      "iterations": 2099580,
+      "real_time": 3.3583827954933315e+02,
+      "cpu_time": 3.3581573457548717e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/3/15",
+      "iterations": 1649656,
+      "real_time": 4.2427198941703512e+02,
+      "cpu_time": 4.2424359987778223e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/1",
+      "iterations": 15155944,
+      "real_time": 4.6883314822829128e+01,
+      "cpu_time": 4.6868476156946230e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/2",
+      "iterations": 8977697,
+      "real_time": 7.8234271655284971e+01,
+      "cpu_time": 7.8221396868264293e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/3",
+      "iterations": 6523279,
+      "real_time": 1.0760156249974737e+02,
+      "cpu_time": 1.0759956764075524e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/4",
+      "iterations": 4932391,
+      "real_time": 1.4628502665592592e+02,
+      "cpu_time": 1.4624246131338307e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/8",
+      "iterations": 2622518,
+      "real_time": 2.7004358482882026e+02,
+      "cpu_time": 2.7002712660123399e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/12",
+      "iterations": 1776519,
+      "real_time": 4.0179950733230476e+02,
+      "cpu_time": 4.0170074173144076e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/4/15",
+      "iterations": 1306653,
+      "real_time": 5.3862783928653209e+02,
+      "cpu_time": 5.3852629581072210e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/1",
+      "iterations": 10522044,
+      "real_time": 6.7281942654985912e+01,
+      "cpu_time": 6.7279323294977956e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/2",
+      "iterations": 5683202,
+      "real_time": 1.1714747144106124e+02,
+      "cpu_time": 1.1712921694495108e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/3",
+      "iterations": 4012588,
+      "real_time": 1.7086801410540352e+02,
+      "cpu_time": 1.7082840301570150e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/4",
+      "iterations": 3210391,
+      "real_time": 2.1769298412875284e+02,
+      "cpu_time": 2.1764763232889757e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/8",
+      "iterations": 1386276,
+      "real_time": 4.2298219331094850e+02,
+      "cpu_time": 4.2296988478487037e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/12",
+      "iterations": 1229796,
+      "real_time": 5.8270287912404478e+02,
+      "cpu_time": 5.8268444522507320e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/8/15",
+      "iterations": 913278,
+      "real_time": 7.2295903331698173e+02,
+      "cpu_time": 7.2266932960169106e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/1",
+      "iterations": 8666906,
+      "real_time": 8.2825083479215138e+01,
+      "cpu_time": 8.2813290002222786e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/2",
+      "iterations": 4787962,
+      "real_time": 1.5893517973517132e+02,
+      "cpu_time": 1.5890017506404547e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/3",
+      "iterations": 3376993,
+      "real_time": 2.1209493534458539e+02,
+      "cpu_time": 2.1208572241637498e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/4",
+      "iterations": 2528052,
+      "real_time": 2.8361835277337747e+02,
+      "cpu_time": 2.8356220520780306e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/8",
+      "iterations": 1365161,
+      "real_time": 5.1563232464065175e+02,
+      "cpu_time": 5.1561244424649635e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/12",
+      "iterations": 774816,
+      "real_time": 9.1982087090455320e+02,
+      "cpu_time": 9.1976933878501256e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/12/15",
+      "iterations": 644128,
+      "real_time": 1.0335456059649105e+03,
+      "cpu_time": 1.0334296909931829e+03,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/1",
+      "iterations": 6749330,
+      "real_time": 9.3389646673451765e+01,
+      "cpu_time": 9.3385713841225680e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/2",
+      "iterations": 4076855,
+      "real_time": 1.7768800584069561e+02,
+      "cpu_time": 1.7766268361275348e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/3",
+      "iterations": 2646983,
+      "real_time": 2.4629957616369117e+02,
+      "cpu_time": 2.4629398828781095e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/4",
+      "iterations": 2202297,
+      "real_time": 3.2136069111042735e+02,
+      "cpu_time": 3.2135175228409929e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/8",
+      "iterations": 1097884,
+      "real_time": 6.1847880284255530e+02,
+      "cpu_time": 6.1845240480779523e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/12",
+      "iterations": 782910,
+      "real_time": 9.0918598563572277e+02,
+      "cpu_time": 9.0896271602092418e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixMatrixMultiplyDynamic/8/15/15",
+      "iterations": 527999,
+      "real_time": 1.3869756686825481e+03,
+      "cpu_time": 1.3864761107502002e+03,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/1",
+      "iterations": 71473060,
+      "real_time": 1.0218954287241552e+01,
+      "cpu_time": 1.0190986645876869e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/2",
+      "iterations": 56615982,
+      "real_time": 1.1715988235202534e+01,
+      "cpu_time": 1.1687053312967441e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/3",
+      "iterations": 41964917,
+      "real_time": 1.5662750051657548e+01,
+      "cpu_time": 1.5605130352097801e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/4",
+      "iterations": 43787767,
+      "real_time": 1.6259733979111644e+01,
+      "cpu_time": 1.6258513479347108e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/6",
+      "iterations": 36258346,
+      "real_time": 1.9466160398137074e+01,
+      "cpu_time": 1.9460291983534194e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/1/8",
+      "iterations": 29992202,
+      "real_time": 2.3460926542272730e+01,
+      "cpu_time": 2.3460664875490462e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/1",
+      "iterations": 64820817,
+      "real_time": 1.1306831399178281e+01,
+      "cpu_time": 1.1305241647910986e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/2",
+      "iterations": 42461300,
+      "real_time": 1.6090760857123335e+01,
+      "cpu_time": 1.6089709924096166e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/3",
+      "iterations": 34222131,
+      "real_time": 2.0463437795226742e+01,
+      "cpu_time": 2.0461291554286287e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/4",
+      "iterations": 28672426,
+      "real_time": 2.4036501725380685e+01,
+      "cpu_time": 2.4036508107125368e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/6",
+      "iterations": 22107753,
+      "real_time": 3.2366105997698078e+01,
+      "cpu_time": 3.2339243160535105e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/2/8",
+      "iterations": 17977851,
+      "real_time": 3.8303000509294193e+01,
+      "cpu_time": 3.8302408891917274e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/1",
+      "iterations": 47772113,
+      "real_time": 1.5043972116588071e+01,
+      "cpu_time": 1.5041369428227512e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/2",
+      "iterations": 34204071,
+      "real_time": 2.0490502315173504e+01,
+      "cpu_time": 2.0488379877355843e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/3",
+      "iterations": 24970214,
+      "real_time": 2.7615499291093524e+01,
+      "cpu_time": 2.7611537490226628e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/4",
+      "iterations": 21303857,
+      "real_time": 3.2186434312846430e+01,
+      "cpu_time": 3.2186425209294711e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/6",
+      "iterations": 15983487,
+      "real_time": 4.3950274933558802e+01,
+      "cpu_time": 4.3944290754573188e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/3/8",
+      "iterations": 13054830,
+      "real_time": 5.3519842379947981e+01,
+      "cpu_time": 5.3519042377420412e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/1",
+      "iterations": 38605567,
+      "real_time": 1.7140124584280613e+01,
+      "cpu_time": 1.7139237975704773e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/2",
+      "iterations": 24963892,
+      "real_time": 2.6611483778803432e+01,
+      "cpu_time": 2.6610874618428802e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/3",
+      "iterations": 20679041,
+      "real_time": 3.3410864169128558e+01,
+      "cpu_time": 3.3410833703554680e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/4",
+      "iterations": 16227632,
+      "real_time": 4.1827737282765789e+01,
+      "cpu_time": 4.1814603634095711e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/6",
+      "iterations": 12500000,
+      "real_time": 5.5144667755812407e+01,
+      "cpu_time": 5.5142799999998715e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/4/8",
+      "iterations": 10199175,
+      "real_time": 7.1093479030740511e+01,
+      "cpu_time": 7.1083690592624933e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/1",
+      "iterations": 23249326,
+      "real_time": 2.9231176507753009e+01,
+      "cpu_time": 2.9228202142289398e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/2",
+      "iterations": 15252472,
+      "real_time": 4.5119020311992401e+01,
+      "cpu_time": 4.5116489969624652e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/3",
+      "iterations": 11697861,
+      "real_time": 5.9494903808970463e+01,
+      "cpu_time": 5.9494979466764093e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/4",
+      "iterations": 9670378,
+      "real_time": 7.5506631901856167e+01,
+      "cpu_time": 7.5465302390455705e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/6",
+      "iterations": 6613757,
+      "real_time": 1.0650588401303291e+02,
+      "cpu_time": 1.0650482018011422e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/8/8",
+      "iterations": 5224816,
+      "real_time": 1.3476276754677889e+02,
+      "cpu_time": 1.3475019981565049e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/1",
+      "iterations": 16780897,
+      "real_time": 3.9535694129434802e+01,
+      "cpu_time": 3.9532570875080800e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/2",
+      "iterations": 11076475,
+      "real_time": 6.2615060207266964e+01,
+      "cpu_time": 6.2613782814477034e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/3",
+      "iterations": 7716475,
+      "real_time": 9.3239785648289768e+01,
+      "cpu_time": 9.3230523004355803e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/4",
+      "iterations": 6209031,
+      "real_time": 1.1262584290792275e+02,
+      "cpu_time": 1.1260195028821359e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/6",
+      "iterations": 4598547,
+      "real_time": 1.5699447042687075e+02,
+      "cpu_time": 1.5698610887308206e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/12/8",
+      "iterations": 3632307,
+      "real_time": 1.9546146239308518e+02,
+      "cpu_time": 1.9545126554555083e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/1",
+      "iterations": 14862532,
+      "real_time": 4.8933481992353883e+01,
+      "cpu_time": 4.8925445543194087e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/2",
+      "iterations": 9272751,
+      "real_time": 7.5060414440919416e+01,
+      "cpu_time": 7.5058307939039238e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/3",
+      "iterations": 6480341,
+      "real_time": 1.0979939651246349e+02,
+      "cpu_time": 1.0979591969003172e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/4",
+      "iterations": 4876180,
+      "real_time": 1.3706889409129832e+02,
+      "cpu_time": 1.3701832171905659e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/6",
+      "iterations": 3760125,
+      "real_time": 1.8651585653560488e+02,
+      "cpu_time": 1.8650789534924448e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/1/15/8",
+      "iterations": 2847137,
+      "real_time": 2.4058243597574796e+02,
+      "cpu_time": 2.4055217574707797e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/1",
+      "iterations": 68775115,
+      "real_time": 1.0161083190702918e+01,
+      "cpu_time": 1.0160099332440879e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/2",
+      "iterations": 49986789,
+      "real_time": 1.4439146730228050e+01,
+      "cpu_time": 1.4437994806987415e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/3",
+      "iterations": 39209975,
+      "real_time": 1.7907087851448971e+01,
+      "cpu_time": 1.7905188666914789e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/4",
+      "iterations": 33433954,
+      "real_time": 2.1579039621029189e+01,
+      "cpu_time": 2.1575940434684497e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/6",
+      "iterations": 23979419,
+      "real_time": 2.9080755623547759e+01,
+      "cpu_time": 2.9078352565590922e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/1/8",
+      "iterations": 18695882,
+      "real_time": 3.7289310238280216e+01,
+      "cpu_time": 3.7285964898579948e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/1",
+      "iterations": 46248885,
+      "real_time": 1.5789034589756211e+01,
+      "cpu_time": 1.5788380627987841e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/2",
+      "iterations": 31242050,
+      "real_time": 2.3166515994496951e+01,
+      "cpu_time": 2.3160420010851698e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/3",
+      "iterations": 23434728,
+      "real_time": 3.0073514528259938e+01,
+      "cpu_time": 3.0070415154806401e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/4",
+      "iterations": 18801991,
+      "real_time": 3.8634371435713341e+01,
+      "cpu_time": 3.8631493866794607e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/6",
+      "iterations": 13261846,
+      "real_time": 5.3974149983132079e+01,
+      "cpu_time": 5.3973104498422586e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/2/8",
+      "iterations": 10277341,
+      "real_time": 6.8221806885820996e+01,
+      "cpu_time": 6.8221828973073386e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/1",
+      "iterations": 36620838,
+      "real_time": 1.9742956780825988e+01,
+      "cpu_time": 1.9738516087478931e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/2",
+      "iterations": 22380520,
+      "real_time": 3.0584310238259096e+01,
+      "cpu_time": 3.0581461020569659e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/3",
+      "iterations": 15505215,
+      "real_time": 4.3842810372514549e+01,
+      "cpu_time": 4.3827125260759594e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/4",
+      "iterations": 13073116,
+      "real_time": 5.4027797198758762e+01,
+      "cpu_time": 5.4026599320315746e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/6",
+      "iterations": 9122780,
+      "real_time": 8.0540206384536461e+01,
+      "cpu_time": 8.0530934649306928e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/3/8",
+      "iterations": 6801796,
+      "real_time": 1.0112268436776978e+02,
+      "cpu_time": 1.0112270347419975e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/1",
+      "iterations": 28346967,
+      "real_time": 2.4199674908345415e+01,
+      "cpu_time": 2.4198109095763538e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/2",
+      "iterations": 17579106,
+      "real_time": 3.9392355676616084e+01,
+      "cpu_time": 3.9392333148229547e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/3",
+      "iterations": 12823096,
+      "real_time": 5.5229829597677757e+01,
+      "cpu_time": 5.5228940031327966e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/4",
+      "iterations": 9197335,
+      "real_time": 7.1939196631556840e+01,
+      "cpu_time": 7.1900936521278567e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/6",
+      "iterations": 6933302,
+      "real_time": 1.0101546536387012e+02,
+      "cpu_time": 1.0101564882072290e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/4/8",
+      "iterations": 4975301,
+      "real_time": 1.3746609339775961e+02,
+      "cpu_time": 1.3745037737415063e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/1",
+      "iterations": 16063741,
+      "real_time": 4.3754604925193078e+01,
+      "cpu_time": 4.3751078904970491e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/2",
+      "iterations": 9354662,
+      "real_time": 7.6260448106230101e+01,
+      "cpu_time": 7.6245298868089733e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/3",
+      "iterations": 6419133,
+      "real_time": 1.0508850618775932e+02,
+      "cpu_time": 1.0508864670665862e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/4",
+      "iterations": 5161975,
+      "real_time": 1.3653278172737677e+02,
+      "cpu_time": 1.3649988618696295e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/6",
+      "iterations": 3333905,
+      "real_time": 2.0003050893993625e+02,
+      "cpu_time": 2.0002939495876288e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/8/8",
+      "iterations": 2567385,
+      "real_time": 2.6870413744843034e+02,
+      "cpu_time": 2.6869674785822286e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/1",
+      "iterations": 11365666,
+      "real_time": 6.4047146198390067e+01,
+      "cpu_time": 6.4030651613372697e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/2",
+      "iterations": 6117651,
+      "real_time": 1.1035232166828166e+02,
+      "cpu_time": 1.1034970775546375e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/3",
+      "iterations": 4512461,
+      "real_time": 1.5934719212622446e+02,
+      "cpu_time": 1.5933079532431327e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/4",
+      "iterations": 3394713,
+      "real_time": 2.0214799867461599e+02,
+      "cpu_time": 2.0209661317467911e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/6",
+      "iterations": 2270648,
+      "real_time": 3.1233783523621946e+02,
+      "cpu_time": 3.1229102881643064e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/12/8",
+      "iterations": 1759811,
+      "real_time": 3.9604278754467276e+02,
+      "cpu_time": 3.9604082483857508e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/1",
+      "iterations": 8687343,
+      "real_time": 7.6983039231792091e+01,
+      "cpu_time": 7.6981419980770895e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/2",
+      "iterations": 4955963,
+      "real_time": 1.3700803639520927e+02,
+      "cpu_time": 1.3700808500788966e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/3",
+      "iterations": 3639010,
+      "real_time": 1.9288314461828989e+02,
+      "cpu_time": 1.9288350402994513e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/4",
+      "iterations": 2613725,
+      "real_time": 2.6284468681672615e+02,
+      "cpu_time": 2.6271738610605422e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/6",
+      "iterations": 1863660,
+      "real_time": 3.7728085221995713e+02,
+      "cpu_time": 3.7727428822852801e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/2/15/8",
+      "iterations": 1000000,
+      "real_time": 5.0862821191549301e+02,
+      "cpu_time": 5.0845500000002630e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/1",
+      "iterations": 64992944,
+      "real_time": 1.0316672268313182e+01,
+      "cpu_time": 1.0316150627058047e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/2",
+      "iterations": 42592807,
+      "real_time": 1.6668357031118255e+01,
+      "cpu_time": 1.6667368271831350e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/3",
+      "iterations": 35216936,
+      "real_time": 1.9412243503492750e+01,
+      "cpu_time": 1.9411796642388449e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/4",
+      "iterations": 29286742,
+      "real_time": 2.3951316948384065e+01,
+      "cpu_time": 2.3944281682133134e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/6",
+      "iterations": 20663231,
+      "real_time": 3.2769599775902847e+01,
+      "cpu_time": 3.2765785757320138e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/1/8",
+      "iterations": 17026825,
+      "real_time": 4.2386166947748293e+01,
+      "cpu_time": 4.2375663108065687e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/1",
+      "iterations": 43096283,
+      "real_time": 1.5777518098728892e+01,
+      "cpu_time": 1.5777532368627053e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/2",
+      "iterations": 28043074,
+      "real_time": 2.4808773925286083e+01,
+      "cpu_time": 2.4808763832383935e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/3",
+      "iterations": 20537676,
+      "real_time": 3.2383738596587712e+01,
+      "cpu_time": 3.2383167404140650e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/4",
+      "iterations": 16883825,
+      "real_time": 4.1009372165414604e+01,
+      "cpu_time": 4.1008065411719905e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/6",
+      "iterations": 11024664,
+      "real_time": 5.8886719720938295e+01,
+      "cpu_time": 5.8876352150046408e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/2/8",
+      "iterations": 9310368,
+      "real_time": 7.5767626156437032e+01,
+      "cpu_time": 7.5761774400326203e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/1",
+      "iterations": 33339525,
+      "real_time": 2.2024360966450818e+01,
+      "cpu_time": 2.2020649664324235e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/2",
+      "iterations": 20745351,
+      "real_time": 3.3640688990218322e+01,
+      "cpu_time": 3.3640741966717030e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/3",
+      "iterations": 15184579,
+      "real_time": 4.7369305992448858e+01,
+      "cpu_time": 4.7351921972943934e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/4",
+      "iterations": 11774006,
+      "real_time": 5.8984107456603738e+01,
+      "cpu_time": 5.8981454570349520e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/6",
+      "iterations": 8160791,
+      "real_time": 8.4328569250943218e+01,
+      "cpu_time": 8.4317316789508908e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/3/8",
+      "iterations": 6131476,
+      "real_time": 1.0972331784711314e+02,
+      "cpu_time": 1.0971143000478807e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/1",
+      "iterations": 27312916,
+      "real_time": 2.5933041459098458e+01,
+      "cpu_time": 2.5931614185758573e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/2",
+      "iterations": 15306067,
+      "real_time": 4.3585445437467484e+01,
+      "cpu_time": 4.3584808559901866e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/3",
+      "iterations": 11960496,
+      "real_time": 5.9213498838155310e+01,
+      "cpu_time": 5.9199133547639811e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/4",
+      "iterations": 9180809,
+      "real_time": 7.6254237082330079e+01,
+      "cpu_time": 7.6239250811118566e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/6",
+      "iterations": 6431459,
+      "real_time": 1.0985451155096177e+02,
+      "cpu_time": 1.0985190141149032e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/4/8",
+      "iterations": 4887415,
+      "real_time": 1.4419382576976491e+02,
+      "cpu_time": 1.4417682967376621e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/1",
+      "iterations": 13676416,
+      "real_time": 4.7890571114145779e+01,
+      "cpu_time": 4.7883305099816475e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/2",
+      "iterations": 8807801,
+      "real_time": 8.0382177334846844e+01,
+      "cpu_time": 8.0377837782663065e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/3",
+      "iterations": 5998183,
+      "real_time": 1.1687517603400644e+02,
+      "cpu_time": 1.1686455715005215e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/4",
+      "iterations": 4804755,
+      "real_time": 1.4730310453699278e+02,
+      "cpu_time": 1.4729970622850630e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/6",
+      "iterations": 3168984,
+      "real_time": 2.1250873814887458e+02,
+      "cpu_time": 2.1250880408357332e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/8/8",
+      "iterations": 2316745,
+      "real_time": 2.9031885426550616e+02,
+      "cpu_time": 2.9029047219266567e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/1",
+      "iterations": 10610884,
+      "real_time": 6.7091701507706901e+01,
+      "cpu_time": 6.7091394081776471e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/2",
+      "iterations": 6071698,
+      "real_time": 1.1987622769820872e+02,
+      "cpu_time": 1.1985032852425132e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/3",
+      "iterations": 4199286,
+      "real_time": 1.7022283193257698e+02,
+      "cpu_time": 1.7022036603364435e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/4",
+      "iterations": 3223341,
+      "real_time": 2.2084220750673069e+02,
+      "cpu_time": 2.2083049854173322e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/6",
+      "iterations": 2110258,
+      "real_time": 3.2486837439454968e+02,
+      "cpu_time": 3.2486785975931156e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/12/8",
+      "iterations": 1658882,
+      "real_time": 4.3248637153984356e+02,
+      "cpu_time": 4.3237433403940628e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/1",
+      "iterations": 8549096,
+      "real_time": 8.1755232245320613e+01,
+      "cpu_time": 8.1755310736949070e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/2",
+      "iterations": 4906771,
+      "real_time": 1.4219038120679400e+02,
+      "cpu_time": 1.4219045478177148e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/3",
+      "iterations": 3242032,
+      "real_time": 2.1243927882894664e+02,
+      "cpu_time": 2.1236866261651991e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/4",
+      "iterations": 2557479,
+      "real_time": 2.8195440313855738e+02,
+      "cpu_time": 2.8177435670049420e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/6",
+      "iterations": 1664534,
+      "real_time": 4.0540242255935544e+02,
+      "cpu_time": 4.0532124907033818e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/3/15/8",
+      "iterations": 1341844,
+      "real_time": 5.2444090298103026e+02,
+      "cpu_time": 5.2437690223306686e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/1",
+      "iterations": 62403609,
+      "real_time": 1.1623122679202432e+01,
+      "cpu_time": 1.1620882375569181e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/2",
+      "iterations": 39838824,
+      "real_time": 1.7179659544895046e+01,
+      "cpu_time": 1.7179648676375070e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/3",
+      "iterations": 32766167,
+      "real_time": 2.1615556255131441e+01,
+      "cpu_time": 2.1606860515603756e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/4",
+      "iterations": 24391349,
+      "real_time": 2.7071690169481549e+01,
+      "cpu_time": 2.7070335470169983e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/6",
+      "iterations": 18509753,
+      "real_time": 3.6619271630814140e+01,
+      "cpu_time": 3.6617452431698268e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/1/8",
+      "iterations": 14812088,
+      "real_time": 4.7171374350631837e+01,
+      "cpu_time": 4.7167489148051850e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/1",
+      "iterations": 36807816,
+      "real_time": 1.7987491516422690e+01,
+      "cpu_time": 1.7987483962645410e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/2",
+      "iterations": 23621436,
+      "real_time": 2.8243106264286279e+01,
+      "cpu_time": 2.8235285949593514e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/3",
+      "iterations": 18059529,
+      "real_time": 3.7942342518010385e+01,
+      "cpu_time": 3.7940911969518403e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/4",
+      "iterations": 13767061,
+      "real_time": 4.8099409165246634e+01,
+      "cpu_time": 4.8085208600441149e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/6",
+      "iterations": 10139637,
+      "real_time": 6.6952879276907154e+01,
+      "cpu_time": 6.6951805079415081e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/2/8",
+      "iterations": 8250728,
+      "real_time": 8.8170155535245442e+01,
+      "cpu_time": 8.8151372824304005e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/1",
+      "iterations": 29278168,
+      "real_time": 2.3159023406592443e+01,
+      "cpu_time": 2.3158996833409589e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/2",
+      "iterations": 17631620,
+      "real_time": 3.9443389771288025e+01,
+      "cpu_time": 3.9430863414704994e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/3",
+      "iterations": 12529758,
+      "real_time": 5.4492783743921215e+01,
+      "cpu_time": 5.4488362823931574e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/4",
+      "iterations": 10417752,
+      "real_time": 6.6576565565740083e+01,
+      "cpu_time": 6.6576647245975579e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/6",
+      "iterations": 7166110,
+      "real_time": 9.9976441742425465e+01,
+      "cpu_time": 9.9964694932119912e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/3/8",
+      "iterations": 5464481,
+      "real_time": 1.2876205205738952e+02,
+      "cpu_time": 1.2875696703859336e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/1",
+      "iterations": 23519688,
+      "real_time": 3.0345575244046909e+01,
+      "cpu_time": 3.0342069163502853e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/2",
+      "iterations": 14302118,
+      "real_time": 4.9426612122235376e+01,
+      "cpu_time": 4.9414918825307758e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/3",
+      "iterations": 10335459,
+      "real_time": 7.2294355962834658e+01,
+      "cpu_time": 7.2122873304418945e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/4",
+      "iterations": 6493205,
+      "real_time": 9.9755975976824729e+01,
+      "cpu_time": 9.9201857942263970e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/6",
+      "iterations": 4564305,
+      "real_time": 1.4452828283853472e+02,
+      "cpu_time": 1.4386439994697531e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/4/8",
+      "iterations": 3651520,
+      "real_time": 1.9601295598578233e+02,
+      "cpu_time": 1.9472247173779311e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/1",
+      "iterations": 11624238,
+      "real_time": 6.0146263001210585e+01,
+      "cpu_time": 6.0092627146829116e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/2",
+      "iterations": 6388727,
+      "real_time": 1.1215399483707569e+02,
+      "cpu_time": 1.1142470166592707e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/3",
+      "iterations": 4873871,
+      "real_time": 1.6555171444420100e+02,
+      "cpu_time": 1.6384717609472713e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/4",
+      "iterations": 3594075,
+      "real_time": 2.0919734646946930e+02,
+      "cpu_time": 2.0288057427848855e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/6",
+      "iterations": 2429594,
+      "real_time": 2.8568016052816324e+02,
+      "cpu_time": 2.8101032518191158e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/8/8",
+      "iterations": 1837989,
+      "real_time": 3.6725845149562946e+02,
+      "cpu_time": 3.6611372538137834e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/1",
+      "iterations": 8204695,
+      "real_time": 8.1424655276155079e+01,
+      "cpu_time": 8.1355979716493493e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/2",
+      "iterations": 4796985,
+      "real_time": 1.4769030756716896e+02,
+      "cpu_time": 1.4744532242648097e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/3",
+      "iterations": 3062104,
+      "real_time": 2.1750303906765461e+02,
+      "cpu_time": 2.1693645937564966e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/4",
+      "iterations": 2520624,
+      "real_time": 2.8551301858828515e+02,
+      "cpu_time": 2.8371427075199244e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/6",
+      "iterations": 1757372,
+      "real_time": 3.9824948044242757e+02,
+      "cpu_time": 3.9803126486596364e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/12/8",
+      "iterations": 1000000,
+      "real_time": 5.2905927901156247e+02,
+      "cpu_time": 5.2889500000003409e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/1",
+      "iterations": 7131796,
+      "real_time": 9.7810379170248950e+01,
+      "cpu_time": 9.7782662319559932e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/2",
+      "iterations": 4083847,
+      "real_time": 1.7354053151036834e+02,
+      "cpu_time": 1.7344307952771672e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/3",
+      "iterations": 2619378,
+      "real_time": 2.7425471238659838e+02,
+      "cpu_time": 2.7410744077410118e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/4",
+      "iterations": 1974891,
+      "real_time": 3.3991598773577073e+02,
+      "cpu_time": 3.3965165672437047e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/6",
+      "iterations": 1448622,
+      "real_time": 4.7448258004178871e+02,
+      "cpu_time": 4.7440671203394760e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/4/15/8",
+      "iterations": 1077006,
+      "real_time": 6.0661674584073819e+02,
+      "cpu_time": 6.0651286993756878e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/1",
+      "iterations": 51451294,
+      "real_time": 1.3556582503605545e+01,
+      "cpu_time": 1.3554275233581883e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/2",
+      "iterations": 34442208,
+      "real_time": 2.0249190380766187e+01,
+      "cpu_time": 2.0235955836513707e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/3",
+      "iterations": 27289172,
+      "real_time": 2.6551243473243439e+01,
+      "cpu_time": 2.6547635816873239e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/4",
+      "iterations": 21995218,
+      "real_time": 3.3027670287106289e+01,
+      "cpu_time": 3.3017949628870511e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/6",
+      "iterations": 15328322,
+      "real_time": 4.5239383604584916e+01,
+      "cpu_time": 4.5235805980591003e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/1/8",
+      "iterations": 12413989,
+      "real_time": 5.8476646226957911e+01,
+      "cpu_time": 5.8470568968607047e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/1",
+      "iterations": 32691338,
+      "real_time": 2.1089277898415219e+01,
+      "cpu_time": 2.1081149997593990e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/2",
+      "iterations": 21468112,
+      "real_time": 3.3973607689995347e+01,
+      "cpu_time": 3.3965119988195426e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/3",
+      "iterations": 15165072,
+      "real_time": 4.6516005137118221e+01,
+      "cpu_time": 4.6511285933889241e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/4",
+      "iterations": 12315921,
+      "real_time": 5.9578955238892931e+01,
+      "cpu_time": 5.9571103127403411e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/6",
+      "iterations": 8621858,
+      "real_time": 8.1341206143922918e+01,
+      "cpu_time": 8.1339196261410933e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/2/8",
+      "iterations": 6631048,
+      "real_time": 1.1145818444051133e+02,
+      "cpu_time": 1.1137621081917288e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/1",
+      "iterations": 23903757,
+      "real_time": 2.8763526841208261e+01,
+      "cpu_time": 2.8741716208041286e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/2",
+      "iterations": 14966123,
+      "real_time": 4.7201062097685181e+01,
+      "cpu_time": 4.7191714246902819e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/3",
+      "iterations": 10801136,
+      "real_time": 6.7185940348810547e+01,
+      "cpu_time": 6.7169971751122972e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/4",
+      "iterations": 8389362,
+      "real_time": 8.3364553351704686e+01,
+      "cpu_time": 8.3363907767948348e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/6",
+      "iterations": 5463884,
+      "real_time": 1.2765818125495605e+02,
+      "cpu_time": 1.2763740957896786e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/3/8",
+      "iterations": 4501260,
+      "real_time": 1.5720641530327470e+02,
+      "cpu_time": 1.5720176128461773e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/1",
+      "iterations": 19483845,
+      "real_time": 3.6366074509964967e+01,
+      "cpu_time": 3.6343288503885347e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/2",
+      "iterations": 11406782,
+      "real_time": 6.0186339667239302e+01,
+      "cpu_time": 6.0182968342866729e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/3",
+      "iterations": 8338197,
+      "real_time": 8.5351657319080601e+01,
+      "cpu_time": 8.5340631793654168e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/4",
+      "iterations": 6058246,
+      "real_time": 1.0923495844860254e+02,
+      "cpu_time": 1.0923343159058757e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/6",
+      "iterations": 4363056,
+      "real_time": 1.6283051558762142e+02,
+      "cpu_time": 1.6282990637755179e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/4/8",
+      "iterations": 3260667,
+      "real_time": 2.2207680725963471e+02,
+      "cpu_time": 2.2203064587705936e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/1",
+      "iterations": 11241368,
+      "real_time": 6.3151522032228591e+01,
+      "cpu_time": 6.3135287449004743e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/2",
+      "iterations": 6031415,
+      "real_time": 1.1361559816589077e+02,
+      "cpu_time": 1.1358727595430599e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/3",
+      "iterations": 4288112,
+      "real_time": 1.6079261434146306e+02,
+      "cpu_time": 1.6078684511972247e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/4",
+      "iterations": 3241987,
+      "real_time": 2.1923580079367372e+02,
+      "cpu_time": 2.1919489498261038e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/6",
+      "iterations": 2082937,
+      "real_time": 3.2217429718655586e+02,
+      "cpu_time": 3.2217681091649717e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/8/8",
+      "iterations": 1690944,
+      "real_time": 4.2858118184490007e+02,
+      "cpu_time": 4.2856652851899872e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/1",
+      "iterations": 7393091,
+      "real_time": 9.2698583996211596e+01,
+      "cpu_time": 9.2666247446426070e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/2",
+      "iterations": 4260836,
+      "real_time": 1.6870986257641803e+02,
+      "cpu_time": 1.6869811464230420e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/3",
+      "iterations": 2726122,
+      "real_time": 2.4784465480026338e+02,
+      "cpu_time": 2.4781356080175183e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/4",
+      "iterations": 2133932,
+      "real_time": 3.2471268155478242e+02,
+      "cpu_time": 3.2467716871952445e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/6",
+      "iterations": 1448394,
+      "real_time": 4.7851793436190400e+02,
+      "cpu_time": 4.7846511377426896e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/12/8",
+      "iterations": 1137120,
+      "real_time": 6.1080555608747670e+02,
+      "cpu_time": 6.1080536794706779e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/1",
+      "iterations": 6325054,
+      "real_time": 1.1604534015976786e+02,
+      "cpu_time": 1.1599711243572453e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/2",
+      "iterations": 3266251,
+      "real_time": 2.1890909332904664e+02,
+      "cpu_time": 2.1888091270389398e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/3",
+      "iterations": 2340417,
+      "real_time": 3.0651368499006946e+02,
+      "cpu_time": 3.0642573524291839e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/4",
+      "iterations": 1737520,
+      "real_time": 3.9936560269846456e+02,
+      "cpu_time": 3.9928633915007259e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/6",
+      "iterations": 1217984,
+      "real_time": 5.9439453058201616e+02,
+      "cpu_time": 5.9431815196256832e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/6/15/8",
+      "iterations": 931830,
+      "real_time": 7.5877669842632963e+02,
+      "cpu_time": 7.5873818185715822e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/1",
+      "iterations": 46516573,
+      "real_time": 1.4977390101744058e+01,
+      "cpu_time": 1.4972341148175227e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/2",
+      "iterations": 29132194,
+      "real_time": 2.2459752739197551e+01,
+      "cpu_time": 2.2457731813814231e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/3",
+      "iterations": 23066226,
+      "real_time": 3.0230697554256871e+01,
+      "cpu_time": 3.0222065803047059e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/4",
+      "iterations": 18471704,
+      "real_time": 3.6375699396191663e+01,
+      "cpu_time": 3.6374229470113249e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/6",
+      "iterations": 10000000,
+      "real_time": 5.0868511397857219e+01,
+      "cpu_time": 5.0868200000007846e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/1/8",
+      "iterations": 10894094,
+      "real_time": 6.6185205023448887e+01,
+      "cpu_time": 6.6171633914671787e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/1",
+      "iterations": 29674848,
+      "real_time": 2.4224997445737838e+01,
+      "cpu_time": 2.4216804749935658e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/2",
+      "iterations": 18571827,
+      "real_time": 3.8720325686354229e+01,
+      "cpu_time": 3.8710623354393242e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/3",
+      "iterations": 13801806,
+      "real_time": 5.1563614210871776e+01,
+      "cpu_time": 5.1561730399628566e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/4",
+      "iterations": 10916520,
+      "real_time": 6.5205774635663829e+01,
+      "cpu_time": 6.5198524804604475e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/6",
+      "iterations": 7301783,
+      "real_time": 9.5048175634761677e+01,
+      "cpu_time": 9.5020627153679243e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/2/8",
+      "iterations": 5840975,
+      "real_time": 1.2915930182068388e+02,
+      "cpu_time": 1.2900209297249791e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/1",
+      "iterations": 21209486,
+      "real_time": 3.7087220640826175e+01,
+      "cpu_time": 3.6957567005632619e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/2",
+      "iterations": 9589435,
+      "real_time": 5.4345414620661082e+01,
+      "cpu_time": 5.4340114928559998e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/3",
+      "iterations": 9362920,
+      "real_time": 7.8291851360821411e+01,
+      "cpu_time": 7.8260841703224330e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/4",
+      "iterations": 7038359,
+      "real_time": 9.6070806847655447e+01,
+      "cpu_time": 9.6065432297503520e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/6",
+      "iterations": 5137238,
+      "real_time": 1.4327072583245283e+02,
+      "cpu_time": 1.4320730322402366e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/3/8",
+      "iterations": 3697092,
+      "real_time": 1.9392137144099195e+02,
+      "cpu_time": 1.9382585015465978e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/1",
+      "iterations": 16382623,
+      "real_time": 4.3238417985279668e+01,
+      "cpu_time": 4.3192594983110659e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/2",
+      "iterations": 10265284,
+      "real_time": 7.3805508839851271e+01,
+      "cpu_time": 7.3513601766887291e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/3",
+      "iterations": 6711088,
+      "real_time": 9.7472410299774424e+01,
+      "cpu_time": 9.7390765848993198e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/4",
+      "iterations": 5544906,
+      "real_time": 1.2865057497267418e+02,
+      "cpu_time": 1.2861083668506240e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/6",
+      "iterations": 3905509,
+      "real_time": 1.8542579443530406e+02,
+      "cpu_time": 1.8540067376621201e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/4/8",
+      "iterations": 2729811,
+      "real_time": 2.6029918041362305e+02,
+      "cpu_time": 2.6025904357483864e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/1",
+      "iterations": 8843744,
+      "real_time": 7.5617499890368904e+01,
+      "cpu_time": 7.5583825131078228e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/2",
+      "iterations": 5366041,
+      "real_time": 1.3194637276852586e+02,
+      "cpu_time": 1.3192892115434097e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/3",
+      "iterations": 3604866,
+      "real_time": 1.9550793538299345e+02,
+      "cpu_time": 1.9549436789050722e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/4",
+      "iterations": 2839284,
+      "real_time": 2.5175813584366495e+02,
+      "cpu_time": 2.5172261739227918e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/6",
+      "iterations": 1987287,
+      "real_time": 3.6079913467532680e+02,
+      "cpu_time": 3.6075916563636036e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/8/8",
+      "iterations": 1484979,
+      "real_time": 4.8329740019174761e+02,
+      "cpu_time": 4.8325329853153823e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/1",
+      "iterations": 6513627,
+      "real_time": 1.0758186768089902e+02,
+      "cpu_time": 1.0752427180739905e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/2",
+      "iterations": 3404090,
+      "real_time": 1.9945774643077795e+02,
+      "cpu_time": 1.9941834675345973e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/3",
+      "iterations": 2538715,
+      "real_time": 2.8033430340346098e+02,
+      "cpu_time": 2.8028983166679978e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/4",
+      "iterations": 1962808,
+      "real_time": 3.6131444033815620e+02,
+      "cpu_time": 3.6130380556836366e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/6",
+      "iterations": 1330849,
+      "real_time": 5.3696716454749549e+02,
+      "cpu_time": 5.3680019295954082e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/12/8",
+      "iterations": 938841,
+      "real_time": 7.0354190640354318e+02,
+      "cpu_time": 7.0351848715594713e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/1",
+      "iterations": 5313496,
+      "real_time": 1.2908906454210609e+02,
+      "cpu_time": 1.2904761761371651e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/2",
+      "iterations": 2750783,
+      "real_time": 2.3788409413018474e+02,
+      "cpu_time": 2.3787990546689804e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/3",
+      "iterations": 2038748,
+      "real_time": 3.3997233348810170e+02,
+      "cpu_time": 3.3994245487917595e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/4",
+      "iterations": 1511667,
+      "real_time": 4.4431510644682237e+02,
+      "cpu_time": 4.4431280169507704e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/6",
+      "iterations": 1036361,
+      "real_time": 6.5201146125101457e+02,
+      "cpu_time": 6.5199095681905658e+02,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeMatrixMultiplyDynamic/8/15/8",
+      "iterations": 799114,
+      "real_time": 8.9333044218943689e+02,
+      "cpu_time": 8.9311412389216093e+02,
+      "time_unit": "ns"
+    }
+  ]
+}
diff --git a/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemv_benchmark.json b/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemv_benchmark.json
new file mode 100644
index 0000000..a8a7795
--- /dev/null
+++ b/internal/ceres/benchmarks/macbook-pro-2014-small_blas_gemv_benchmark.json
@@ -0,0 +1,599 @@
+{
+  "context": {
+    "date": "2018-03-23 13:34:44",
+    "num_cpus": 8,
+    "mhz_per_cpu": 2200,
+    "cpu_scaling_enabled": false,
+    "library_build_type": "release"
+  },
+  "benchmarks": [
+    {
+      "name": "BM_MatrixVectorMultiply/1/1",
+      "iterations": 75370933,
+      "real_time": 8.9246668610270454e+00,
+      "cpu_time": 8.9241564782009526e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/2",
+      "iterations": 79276096,
+      "real_time": 9.1768834835134339e+00,
+      "cpu_time": 9.1733452666488553e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/3",
+      "iterations": 86461383,
+      "real_time": 8.1339961325563639e+00,
+      "cpu_time": 8.1315146208105382e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/4",
+      "iterations": 80784766,
+      "real_time": 8.6175041966102217e+00,
+      "cpu_time": 8.6169092821287645e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/8",
+      "iterations": 62071595,
+      "real_time": 1.1623699777324967e+01,
+      "cpu_time": 1.1622159862333158e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/12",
+      "iterations": 46187548,
+      "real_time": 1.4648812380771005e+01,
+      "cpu_time": 1.4647497632911776e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/1/15",
+      "iterations": 43216546,
+      "real_time": 1.5979603784946462e+01,
+      "cpu_time": 1.5978764244602067e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/1",
+      "iterations": 85460694,
+      "real_time": 8.4557136173013969e+00,
+      "cpu_time": 8.4517333781539410e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/2",
+      "iterations": 73809298,
+      "real_time": 9.3563609147448190e+00,
+      "cpu_time": 9.3563550760230729e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/3",
+      "iterations": 60910879,
+      "real_time": 1.1561664410556345e+01,
+      "cpu_time": 1.1560660616964670e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/4",
+      "iterations": 57011614,
+      "real_time": 1.2235077136567469e+01,
+      "cpu_time": 1.2233770473503897e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/8",
+      "iterations": 37294746,
+      "real_time": 1.7008151843698663e+01,
+      "cpu_time": 1.7008079368605955e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/12",
+      "iterations": 36096615,
+      "real_time": 2.0234282769587981e+01,
+      "cpu_time": 2.0232423455772722e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/2/15",
+      "iterations": 29620477,
+      "real_time": 2.4192867556653738e+01,
+      "cpu_time": 2.4189515921705162e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/1",
+      "iterations": 70819380,
+      "real_time": 1.0241319961826111e+01,
+      "cpu_time": 1.0236957736709932e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/2",
+      "iterations": 49055678,
+      "real_time": 1.4430740841341054e+01,
+      "cpu_time": 1.4428604982281607e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/3",
+      "iterations": 46364678,
+      "real_time": 1.4935508190628967e+01,
+      "cpu_time": 1.4931711593036367e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/4",
+      "iterations": 41730007,
+      "real_time": 1.6495830781686326e+01,
+      "cpu_time": 1.6495492080794520e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/8",
+      "iterations": 32099490,
+      "real_time": 2.1899212414803351e+01,
+      "cpu_time": 2.1896111122014723e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/12",
+      "iterations": 26976615,
+      "real_time": 2.5065036735486377e+01,
+      "cpu_time": 2.5063782094232344e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/3/15",
+      "iterations": 23158717,
+      "real_time": 3.0120809500024972e+01,
+      "cpu_time": 3.0119803268894366e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/1",
+      "iterations": 54510341,
+      "real_time": 1.2223535017736792e+01,
+      "cpu_time": 1.2219974921822656e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/2",
+      "iterations": 45100187,
+      "real_time": 1.5694088963631776e+01,
+      "cpu_time": 1.5693859539872824e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/3",
+      "iterations": 39166098,
+      "real_time": 1.7640497530917788e+01,
+      "cpu_time": 1.7634996470672178e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/4",
+      "iterations": 34750664,
+      "real_time": 2.1339453860003271e+01,
+      "cpu_time": 2.1335160674915432e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/8",
+      "iterations": 30043950,
+      "real_time": 2.3893673667994594e+01,
+      "cpu_time": 2.3885541015745137e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/12",
+      "iterations": 21692445,
+      "real_time": 3.2236621367456465e+01,
+      "cpu_time": 3.2233111574098835e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/4/15",
+      "iterations": 17051627,
+      "real_time": 3.9894564547043721e+01,
+      "cpu_time": 3.9893026043790357e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/1",
+      "iterations": 43622404,
+      "real_time": 1.6067802864650357e+01,
+      "cpu_time": 1.6063832703947249e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/2",
+      "iterations": 33862065,
+      "real_time": 1.9549917288877438e+01,
+      "cpu_time": 1.9548601067300531e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/3",
+      "iterations": 32245731,
+      "real_time": 2.1789015355885351e+01,
+      "cpu_time": 2.1776557027037121e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/4",
+      "iterations": 31862790,
+      "real_time": 2.2554395395088299e+01,
+      "cpu_time": 2.2547178071976678e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/8",
+      "iterations": 20659998,
+      "real_time": 3.2157974121069905e+01,
+      "cpu_time": 3.2154552967527074e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/12",
+      "iterations": 15551719,
+      "real_time": 4.5013863291655184e+01,
+      "cpu_time": 4.4924037014814900e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/6/15",
+      "iterations": 12874274,
+      "real_time": 5.4008553339302892e+01,
+      "cpu_time": 5.4001647005493055e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/1",
+      "iterations": 35825968,
+      "real_time": 1.9189763050020964e+01,
+      "cpu_time": 1.9188037012705493e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/2",
+      "iterations": 28860743,
+      "real_time": 2.3957427986537024e+01,
+      "cpu_time": 2.3950526845410646e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/3",
+      "iterations": 25577503,
+      "real_time": 2.6716626640397276e+01,
+      "cpu_time": 2.6707845562563413e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/4",
+      "iterations": 27263982,
+      "real_time": 2.6871541249702378e+01,
+      "cpu_time": 2.6865224602921419e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/8",
+      "iterations": 18390176,
+      "real_time": 3.8055934538584296e+01,
+      "cpu_time": 3.8052327503554217e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/12",
+      "iterations": 13196592,
+      "real_time": 5.7004881560951908e+01,
+      "cpu_time": 5.7001989604588779e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixVectorMultiply/8/15",
+      "iterations": 10753844,
+      "real_time": 6.5649689450107800e+01,
+      "cpu_time": 6.5649734178773684e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/1",
+      "iterations": 87950748,
+      "real_time": 8.1611993221244958e+00,
+      "cpu_time": 8.1607606111547870e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/2",
+      "iterations": 82828474,
+      "real_time": 8.1970134683295868e+00,
+      "cpu_time": 8.1947664519329049e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/3",
+      "iterations": 79647729,
+      "real_time": 9.1236429599833766e+00,
+      "cpu_time": 9.1225827669236015e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/4",
+      "iterations": 60000343,
+      "real_time": 1.1501341685749610e+01,
+      "cpu_time": 1.1497550939000405e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/8",
+      "iterations": 42555778,
+      "real_time": 1.6328523285845055e+01,
+      "cpu_time": 1.6328429009099469e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/12",
+      "iterations": 34690560,
+      "real_time": 2.0900962193107770e+01,
+      "cpu_time": 2.0898105997712126e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/1/15",
+      "iterations": 22984807,
+      "real_time": 3.0876962381519814e+01,
+      "cpu_time": 3.0874873128149208e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/1",
+      "iterations": 82616342,
+      "real_time": 8.4971062507129691e+00,
+      "cpu_time": 8.4946026780028845e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/2",
+      "iterations": 66217648,
+      "real_time": 1.0427153785878781e+01,
+      "cpu_time": 1.0426691687992145e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/3",
+      "iterations": 52740629,
+      "real_time": 1.3438166219244954e+01,
+      "cpu_time": 1.3437818498524338e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/4",
+      "iterations": 44820940,
+      "real_time": 1.6290960697977020e+01,
+      "cpu_time": 1.6288391095769057e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/8",
+      "iterations": 28365116,
+      "real_time": 2.4753231118037846e+01,
+      "cpu_time": 2.4752586945175814e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/12",
+      "iterations": 20152990,
+      "real_time": 3.3992543981506721e+01,
+      "cpu_time": 3.3989646201382811e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/2/15",
+      "iterations": 17477847,
+      "real_time": 4.1158645625261137e+01,
+      "cpu_time": 4.1148031562468589e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/1",
+      "iterations": 85071217,
+      "real_time": 8.6638082650656063e+00,
+      "cpu_time": 8.6620601654259222e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/2",
+      "iterations": 56597671,
+      "real_time": 1.2288259387556581e+01,
+      "cpu_time": 1.2287025026171113e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/3",
+      "iterations": 44866043,
+      "real_time": 1.6032917745159793e+01,
+      "cpu_time": 1.6028580902488002e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/4",
+      "iterations": 40158108,
+      "real_time": 1.7255109055033969e+01,
+      "cpu_time": 1.7254896570326416e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/8",
+      "iterations": 25254165,
+      "real_time": 2.8276512487003924e+01,
+      "cpu_time": 2.8273039318464768e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/12",
+      "iterations": 18068759,
+      "real_time": 3.9514967575238380e+01,
+      "cpu_time": 3.9513781771067336e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/3/15",
+      "iterations": 14997033,
+      "real_time": 4.7088586853686969e+01,
+      "cpu_time": 4.7085713554140796e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/1",
+      "iterations": 78983594,
+      "real_time": 8.9500300024881376e+00,
+      "cpu_time": 8.9485925393569730e+00,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/2",
+      "iterations": 50655629,
+      "real_time": 1.3881427511987113e+01,
+      "cpu_time": 1.3880806810236136e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/3",
+      "iterations": 42322156,
+      "real_time": 1.6617042784854270e+01,
+      "cpu_time": 1.6616851939206612e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/4",
+      "iterations": 35709549,
+      "real_time": 1.9691253563928413e+01,
+      "cpu_time": 1.9687591125835635e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/8",
+      "iterations": 20404356,
+      "real_time": 3.3671754796556790e+01,
+      "cpu_time": 3.3668153996136844e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/12",
+      "iterations": 15090728,
+      "real_time": 4.7125273353021399e+01,
+      "cpu_time": 4.7122776316689396e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/4/15",
+      "iterations": 11336950,
+      "real_time": 6.2453472226796620e+01,
+      "cpu_time": 6.2451805820789019e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/1",
+      "iterations": 65892276,
+      "real_time": 1.0683369458878103e+01,
+      "cpu_time": 1.0683331078137206e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/2",
+      "iterations": 45151386,
+      "real_time": 1.5743454386474488e+01,
+      "cpu_time": 1.5741886638873094e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/3",
+      "iterations": 35555194,
+      "real_time": 2.0272604644448467e+01,
+      "cpu_time": 2.0265815453011015e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/4",
+      "iterations": 28844688,
+      "real_time": 2.4899265682219198e+01,
+      "cpu_time": 2.4896646481320971e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/8",
+      "iterations": 16677944,
+      "real_time": 4.2613494320617384e+01,
+      "cpu_time": 4.2610288174610147e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/12",
+      "iterations": 10657572,
+      "real_time": 6.7212158173757643e+01,
+      "cpu_time": 6.7206583263055407e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/6/15",
+      "iterations": 8660580,
+      "real_time": 8.0843434848483710e+01,
+      "cpu_time": 8.0843430809486151e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/1",
+      "iterations": 57066458,
+      "real_time": 1.2319644088657304e+01,
+      "cpu_time": 1.2319338270477425e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/2",
+      "iterations": 38263912,
+      "real_time": 1.8003573181401965e+01,
+      "cpu_time": 1.7997610908158087e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/3",
+      "iterations": 29869088,
+      "real_time": 2.4137524754083010e+01,
+      "cpu_time": 2.4137328866552775e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/4",
+      "iterations": 22616613,
+      "real_time": 3.0444019799836454e+01,
+      "cpu_time": 3.0442931485806785e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/8",
+      "iterations": 12552902,
+      "real_time": 5.4102999925791671e+01,
+      "cpu_time": 5.4099761154830723e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/12",
+      "iterations": 9204229,
+      "real_time": 7.7715734577381653e+01,
+      "cpu_time": 7.7705802408872202e+01,
+      "time_unit": "ns"
+    },
+    {
+      "name": "BM_MatrixTransposeVectorMultiply/8/15",
+      "iterations": 7493764,
+      "real_time": 9.3483444364895007e+01,
+      "cpu_time": 9.3483461715635883e+01,
+      "time_unit": "ns"
+    }
+  ]
+}
diff --git a/internal/ceres/blas.cc b/internal/ceres/blas.cc
new file mode 100644
index 0000000..3ba63bb
--- /dev/null
+++ b/internal/ceres/blas.cc
@@ -0,0 +1,81 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/blas.h"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+#ifndef CERES_NO_LAPACK
+extern "C" void dsyrk_(char* uplo,
+                       char* trans,
+                       int* n,
+                       int* k,
+                       double* alpha,
+                       double* a,
+                       int* lda,
+                       double* beta,
+                       double* c,
+                       int* ldc);
+#endif
+
+namespace ceres {
+namespace internal {
+
+void BLAS::SymmetricRankKUpdate(int num_rows,
+                                int num_cols,
+                                const double* a,
+                                bool transpose,
+                                double alpha,
+                                double beta,
+                                double* c) {
+#ifdef CERES_NO_LAPACK
+  LOG(FATAL) << "Ceres was built without a BLAS library.";
+#else
+  char uplo = 'L';
+  char trans = transpose ? 'T' : 'N';
+  int n = transpose ? num_cols : num_rows;
+  int k = transpose ? num_rows : num_cols;
+  int lda = k;
+  int ldc = n;
+  dsyrk_(&uplo,
+         &trans,
+         &n,
+         &k,
+         &alpha,
+         const_cast<double*>(a),
+         &lda,
+         &beta,
+         c,
+         &ldc);
+#endif
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/blas.h b/internal/ceres/blas.h
new file mode 100644
index 0000000..a43301c
--- /dev/null
+++ b/internal/ceres/blas.h
@@ -0,0 +1,57 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Wrapper functions around BLAS functions.
+
+#ifndef CERES_INTERNAL_BLAS_H_
+#define CERES_INTERNAL_BLAS_H_
+
+namespace ceres {
+namespace internal {
+
+class BLAS {
+ public:
+  // transpose = true  : c = alpha * a'a + beta * c;
+  // transpose = false : c = alpha * aa' + beta * c;
+  //
+  // Assumes column major matrices.
+  static void SymmetricRankKUpdate(int num_rows,
+                                   int num_cols,
+                                   const double* a,
+                                   bool transpose,
+                                   double alpha,
+                                   double beta,
+                                   double* c);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLAS_H_
diff --git a/internal/ceres/block_evaluate_preparer.cc b/internal/ceres/block_evaluate_preparer.cc
new file mode 100644
index 0000000..59c0d3e
--- /dev/null
+++ b/internal/ceres/block_evaluate_preparer.cc
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_evaluate_preparer.h"
+
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+void BlockEvaluatePreparer::Init(int const* const* jacobian_layout,
+                                 int max_derivatives_per_residual_block) {
+  jacobian_layout_ = jacobian_layout;
+  scratch_evaluate_preparer_.Init(max_derivatives_per_residual_block);
+}
+
+// Point the jacobian blocks directly into the block sparse matrix.
+void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
+                                    int residual_block_index,
+                                    SparseMatrix* jacobian,
+                                    double** jacobians) {
+  // If the overall jacobian is not available, use the scratch space.
+  if (jacobian == NULL) {
+    scratch_evaluate_preparer_.Prepare(residual_block,
+                                       residual_block_index,
+                                       jacobian,
+                                       jacobians);
+    return;
+  }
+
+  double* jacobian_values =
+      down_cast<BlockSparseMatrix*>(jacobian)->mutable_values();
+
+  const int* jacobian_block_offset = jacobian_layout_[residual_block_index];
+  const int num_parameter_blocks = residual_block->NumParameterBlocks();
+  for (int j = 0; j < num_parameter_blocks; ++j) {
+    if (!residual_block->parameter_blocks()[j]->IsConstant()) {
+      jacobians[j] = jacobian_values + *jacobian_block_offset;
+
+      // The jacobian_block_offset can't be indexed with 'j' since the code
+      // that creates the layout strips out any blocks for inactive
+      // parameters. Instead, bump the pointer for active parameters only.
+      jacobian_block_offset++;
+    } else {
+      jacobians[j] = NULL;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_evaluate_preparer.h b/internal/ceres/block_evaluate_preparer.h
new file mode 100644
index 0000000..4378689
--- /dev/null
+++ b/internal/ceres/block_evaluate_preparer.h
@@ -0,0 +1,77 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A evaluate preparer which puts jacobian the evaluated jacobian blocks
+// directly into their final resting place in an overall block sparse matrix.
+// The evaluator takes care to avoid evaluating the jacobian for fixed
+// parameters.
+
+#ifndef CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
+#define CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
+
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class ResidualBlock;
+class SparseMatrix;
+
+class BlockEvaluatePreparer {
+ public:
+  // Using Init() instead of a constructor allows for allocating this structure
+  // with new[]. This is because C++ doesn't allow passing arguments to objects
+  // constructed with new[] (as opposed to plain 'new').
+  void Init(int const* const* jacobian_layout,
+            int max_derivatives_per_residual_block);
+
+  // EvaluatePreparer interface
+
+  // Point the jacobian blocks directly into the block sparse matrix, if
+  // jacobian is non-null. Otherwise, uses an internal per-thread buffer to
+  // store the jacobians temporarily.
+  void Prepare(const ResidualBlock* residual_block,
+               int residual_block_index,
+               SparseMatrix* jacobian,
+               double** jacobians);
+
+ private:
+  int const* const* jacobian_layout_;
+
+  // For the case that the overall jacobian is not available, but the
+  // individual jacobians are requested, use a pass-through scratch evaluate
+  // preparer.
+  ScratchEvaluatePreparer scratch_evaluate_preparer_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
new file mode 100644
index 0000000..772c7af
--- /dev/null
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -0,0 +1,105 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_jacobi_preconditioner.h"
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+BlockJacobiPreconditioner::BlockJacobiPreconditioner(
+    const BlockSparseMatrix& A) {
+  const CompressedRowBlockStructure* bs = A.block_structure();
+  std::vector<int> blocks(bs->cols.size());
+  for (int i = 0; i < blocks.size(); ++i) {
+    blocks[i] = bs->cols[i].size;
+  }
+
+  m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+}
+
+BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {}
+
+bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+                                           const double* D) {
+  const CompressedRowBlockStructure* bs = A.block_structure();
+  const double* values = A.values();
+  m_->SetZero();
+  for (int i = 0; i < bs->rows.size(); ++i) {
+    const int row_block_size = bs->rows[i].block.size;
+    const std::vector<Cell>& cells = bs->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      const int block_id = cells[j].block_id;
+      const int col_block_size = bs->cols[block_id].size;
+
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = m_->GetCell(block_id, block_id,
+                                        &r, &c,
+                                        &row_stride, &col_stride);
+      MatrixRef m(cell_info->values, row_stride, col_stride);
+      ConstMatrixRef b(values + cells[j].position,
+                       row_block_size,
+                       col_block_size);
+      m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
+    }
+  }
+
+  if (D != NULL) {
+    // Add the diagonal.
+    int position = 0;
+    for (int i = 0; i < bs->cols.size(); ++i) {
+      const int block_size = bs->cols[i].size;
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = m_->GetCell(i, i,
+                                        &r, &c,
+                                        &row_stride, &col_stride);
+      MatrixRef m(cell_info->values, row_stride, col_stride);
+      m.block(r, c, block_size, block_size).diagonal() +=
+          ConstVectorRef(D + position, block_size).array().square().matrix();
+      position += block_size;
+    }
+  }
+
+  m_->Invert();
+  return true;
+}
+
+void BlockJacobiPreconditioner::RightMultiply(const double* x,
+                                              double* y) const {
+  m_->RightMultiply(x, y);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_jacobi_preconditioner.h b/internal/ceres/block_jacobi_preconditioner.h
new file mode 100644
index 0000000..a6d19e8
--- /dev/null
+++ b/internal/ceres/block_jacobi_preconditioner.h
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
+#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
+
+#include <memory>
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/preconditioner.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+struct CompressedRowBlockStructure;
+
+// A block Jacobi preconditioner. This is intended for use with
+// conjugate gradients, or other iterative symmetric solvers. To use
+// the preconditioner, create one by passing a BlockSparseMatrix "A"
+// to the constructor. This fixes the sparsity pattern to the pattern
+// of the matrix A^TA.
+//
+// Before each use of the preconditioner in a solve with conjugate gradients,
+// update the matrix by running Update(A, D). The values of the matrix A are
+// inspected to construct the preconditioner. The vector D is applied as the
+// D^TD diagonal term.
+class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
+ public:
+  // A must remain valid while the BlockJacobiPreconditioner is.
+  explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
+  BlockJacobiPreconditioner(const BlockJacobiPreconditioner&) = delete;
+  void operator=(const BlockJacobiPreconditioner&) = delete;
+
+  virtual ~BlockJacobiPreconditioner();
+
+  // Preconditioner interface
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual int num_rows() const { return m_->num_rows(); }
+  virtual int num_cols() const { return m_->num_rows(); }
+  const BlockRandomAccessDiagonalMatrix& matrix() const { return *m_; }
+
+ private:
+  virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+
+  std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
diff --git a/internal/ceres/block_jacobi_preconditioner_test.cc b/internal/ceres/block_jacobi_preconditioner_test.cc
new file mode 100644
index 0000000..4a9a871
--- /dev/null
+++ b/internal/ceres/block_jacobi_preconditioner_test.cc
@@ -0,0 +1,105 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_jacobi_preconditioner.h"
+
+#include <memory>
+#include <vector>
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/block_sparse_matrix.h"
+#include "gtest/gtest.h"
+#include "Eigen/Dense"
+
+namespace ceres {
+namespace internal {
+
+
+class BlockJacobiPreconditionerTest : public ::testing::Test {
+ protected:
+  void SetUpFromProblemId(int problem_id) {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(problem_id));
+
+    CHECK(problem != nullptr);
+    A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    D.reset(problem->D.release());
+
+    Matrix dense_a;
+    A->ToDenseMatrix(&dense_a);
+    dense_ata = dense_a.transpose() * dense_a;
+    dense_ata += VectorRef(D.get(), A->num_cols())
+        .array().square().matrix().asDiagonal();
+  }
+
+  void VerifyDiagonalBlocks(const int problem_id) {
+    SetUpFromProblemId(problem_id);
+
+    BlockJacobiPreconditioner pre(*A);
+    pre.Update(*A, D.get());
+    BlockRandomAccessDiagonalMatrix* m =
+        const_cast<BlockRandomAccessDiagonalMatrix*>(&pre.matrix());
+    EXPECT_EQ(m->num_rows(), A->num_cols());
+    EXPECT_EQ(m->num_cols(), A->num_cols());
+
+    const CompressedRowBlockStructure* bs = A->block_structure();
+    for (int i = 0; i < bs->cols.size(); ++i) {
+      const int block_size = bs->cols[i].size;
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = m->GetCell(i, i,
+                                       &r, &c,
+                                       &row_stride, &col_stride);
+      MatrixRef m(cell_info->values, row_stride, col_stride);
+      Matrix actual_block_inverse = m.block(r, c, block_size, block_size);
+      Matrix expected_block = dense_ata.block(bs->cols[i].position,
+                                              bs->cols[i].position,
+                                              block_size,
+                                              block_size);
+      const double residual = (actual_block_inverse * expected_block -
+                               Matrix::Identity(block_size, block_size)).norm();
+      EXPECT_NEAR(residual, 0.0, 1e-12) << "Block: " << i;
+    }
+  }
+
+  std::unique_ptr<BlockSparseMatrix> A;
+  std::unique_ptr<double[]> D;
+  Matrix dense_ata;
+};
+
+TEST_F(BlockJacobiPreconditionerTest, SmallProblem) {
+  VerifyDiagonalBlocks(2);
+}
+
+TEST_F(BlockJacobiPreconditionerTest, LargeProblem) {
+  VerifyDiagonalBlocks(3);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
new file mode 100644
index 0000000..6998bd6
--- /dev/null
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -0,0 +1,213 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_jacobian_writer.h"
+
+#include "ceres/block_evaluate_preparer.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+namespace {
+
+// Given the residual block ordering, build a lookup table to determine which
+// per-parameter jacobian goes where in the overall program jacobian.
+//
+// Since we expect to use a Schur type linear solver to solve the LM step, take
+// extra care to place the E blocks and the F blocks contiguously. E blocks are
+// the first num_eliminate_blocks parameter blocks as indicated by the parameter
+// block ordering. The remaining parameter blocks are the F blocks.
+//
+// TODO(keir): Consider if we should use a boolean for each parameter block
+// instead of num_eliminate_blocks.
+void BuildJacobianLayout(const Program& program,
+                         int num_eliminate_blocks,
+                         vector<int*>* jacobian_layout,
+                         vector<int>* jacobian_layout_storage) {
+  const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+
+  // Iterate over all the active residual blocks and determine how many E blocks
+  // are there. This will determine where the F blocks start in the jacobian
+  // matrix. Also compute the number of jacobian blocks.
+  int f_block_pos = 0;
+  int num_jacobian_blocks = 0;
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks[i];
+    const int num_residuals = residual_block->NumResiduals();
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+    // Advance f_block_pos over each E block for this residual.
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      if (!parameter_block->IsConstant()) {
+        // Only count blocks for active parameters.
+        num_jacobian_blocks++;
+        if (parameter_block->index() < num_eliminate_blocks) {
+          f_block_pos += num_residuals * parameter_block->LocalSize();
+        }
+      }
+    }
+  }
+
+  // We now know that the E blocks are laid out starting at zero, and the F
+  // blocks are laid out starting at f_block_pos. Iterate over the residual
+  // blocks again, and this time fill the jacobian_layout array with the
+  // position information.
+
+  jacobian_layout->resize(program.NumResidualBlocks());
+  jacobian_layout_storage->resize(num_jacobian_blocks);
+
+  int e_block_pos = 0;
+  int* jacobian_pos = &(*jacobian_layout_storage)[0];
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks[i];
+    const int num_residuals = residual_block->NumResiduals();
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+    (*jacobian_layout)[i] = jacobian_pos;
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      const int parameter_block_index = parameter_block->index();
+      if (parameter_block->IsConstant()) {
+        continue;
+      }
+      const int jacobian_block_size =
+          num_residuals * parameter_block->LocalSize();
+      if (parameter_block_index < num_eliminate_blocks) {
+        *jacobian_pos = e_block_pos;
+        e_block_pos += jacobian_block_size;
+      } else {
+        *jacobian_pos = f_block_pos;
+        f_block_pos += jacobian_block_size;
+      }
+      jacobian_pos++;
+    }
+  }
+}
+
+}  // namespace
+
+BlockJacobianWriter::BlockJacobianWriter(const Evaluator::Options& options,
+                                         Program* program)
+    : program_(program) {
+  CHECK_GE(options.num_eliminate_blocks, 0)
+      << "num_eliminate_blocks must be greater than 0.";
+
+  BuildJacobianLayout(*program,
+                      options.num_eliminate_blocks,
+                      &jacobian_layout_,
+                      &jacobian_layout_storage_);
+}
+
+// Create evaluate prepareres that point directly into the final jacobian. This
+// makes the final Write() a nop.
+BlockEvaluatePreparer* BlockJacobianWriter::CreateEvaluatePreparers(
+    int num_threads) {
+  int max_derivatives_per_residual_block =
+      program_->MaxDerivativesPerResidualBlock();
+
+  BlockEvaluatePreparer* preparers = new BlockEvaluatePreparer[num_threads];
+  for (int i = 0; i < num_threads; i++) {
+    preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
+  }
+  return preparers;
+}
+
+SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+
+  const vector<ParameterBlock*>& parameter_blocks =
+      program_->parameter_blocks();
+
+  // Construct the column blocks.
+  bs->cols.resize(parameter_blocks.size());
+  for (int i = 0, cursor = 0; i < parameter_blocks.size(); ++i) {
+    CHECK_NE(parameter_blocks[i]->index(), -1);
+    CHECK(!parameter_blocks[i]->IsConstant());
+    bs->cols[i].size = parameter_blocks[i]->LocalSize();
+    bs->cols[i].position = cursor;
+    cursor += bs->cols[i].size;
+  }
+
+  // Construct the cells in each row.
+  const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
+  int row_block_position = 0;
+  bs->rows.resize(residual_blocks.size());
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks[i];
+    CompressedRow* row = &bs->rows[i];
+
+    row->block.size = residual_block->NumResiduals();
+    row->block.position = row_block_position;
+    row_block_position += row->block.size;
+
+    // Size the row by the number of active parameters in this residual.
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    int num_active_parameter_blocks = 0;
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (residual_block->parameter_blocks()[j]->index() != -1) {
+        num_active_parameter_blocks++;
+      }
+    }
+    row->cells.resize(num_active_parameter_blocks);
+
+    // Add layout information for the active parameters in this row.
+    for (int j = 0, k = 0; j < num_parameter_blocks; ++j) {
+      const ParameterBlock* parameter_block =
+          residual_block->parameter_blocks()[j];
+      if (!parameter_block->IsConstant()) {
+        Cell& cell = row->cells[k];
+        cell.block_id = parameter_block->index();
+        cell.position = jacobian_layout_[i][k];
+
+        // Only increment k for active parameters, since there is only layout
+        // information for active parameters.
+        k++;
+      }
+    }
+
+    sort(row->cells.begin(), row->cells.end(), CellLessThan);
+  }
+
+  BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
+  CHECK(jacobian != nullptr);
+  return jacobian;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_jacobian_writer.h b/internal/ceres/block_jacobian_writer.h
new file mode 100644
index 0000000..c94a0d3
--- /dev/null
+++ b/internal/ceres/block_jacobian_writer.h
@@ -0,0 +1,128 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that writes to block sparse matrices. The "writer" name is
+// misleading, since the Write() operation on the block jacobian writer does not
+// write anything. Instead, the Prepare() method on the BlockEvaluatePreparers
+// makes a jacobians array which has direct pointers into the block sparse
+// jacobian. When the cost function is evaluated, the jacobian blocks get placed
+// directly in their final location.
+
+#ifndef CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
+
+#include <vector>
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockEvaluatePreparer;
+class Program;
+class SparseMatrix;
+
+// TODO(sameeragarwal): This class needs documemtation.
+class BlockJacobianWriter {
+ public:
+  BlockJacobianWriter(const Evaluator::Options& options,
+                      Program* program);
+
+  // JacobianWriter interface.
+
+  // Create evaluate prepareres that point directly into the final jacobian.
+  // This makes the final Write() a nop.
+  BlockEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+
+  SparseMatrix* CreateJacobian() const;
+
+  void Write(int /* residual_id */,
+             int /* residual_offset */,
+             double** /* jacobians */,
+             SparseMatrix* /* jacobian */) {
+    // This is a noop since the blocks were written directly into their final
+    // position by the outside evaluate call, thanks to the jacobians array
+    // prepared by the BlockEvaluatePreparers.
+  }
+
+ private:
+  Program* program_;
+
+  // Stores the position of each residual / parameter jacobian.
+  //
+  // The block sparse matrix that this writer writes to is stored as a set of
+  // contiguos dense blocks, one after each other; see BlockSparseMatrix. The
+  // "double* values_" member of the block sparse matrix contains all of these
+  // blocks. Given a pointer to the first element of a block and the size of
+  // that block, it's possible to write to it.
+  //
+  // In the case of a block sparse jacobian, the jacobian writer needs a way to
+  // find the offset in the values_ array of each residual/parameter jacobian
+  // block.
+  //
+  // That is the purpose of jacobian_layout_.
+  //
+  // In particular, jacobian_layout_[i][j] is the offset in the values_ array of
+  // the derivative of residual block i with respect to the parameter block at
+  // active argument position j.
+  //
+  // The active qualifier means that non-active parameters do not count. Care
+  // must be taken when indexing into jacobian_layout_ to account for this.
+  // Consider a single residual example:
+  //
+  //   r(x, y, z)
+  //
+  // with r in R^3, x in R^4, y in R^2, and z in R^5.
+  // Take y as a constant (non-active) parameter.
+  // Take r as residual number 0.
+  //
+  // In this case, the active arguments are only (x, z), so the active argument
+  // position for x is 0, and the active argument position for z is 1. This is
+  // similar to thinking of r as taking only 2 parameters:
+  //
+  //   r(x, z)
+  //
+  // There are only 2 jacobian blocks: dr/dx and dr/dz. jacobian_layout_ would
+  // have the following contents:
+  //
+  //   jacobian_layout_[0] = { 0, 12 }
+  //
+  // which indicates that dr/dx is located at values_[0], and dr/dz is at
+  // values_[12]. See BlockEvaluatePreparer::Prepare()'s comments about 'j'.
+  std::vector<int*> jacobian_layout_;
+
+  // The pointers in jacobian_layout_ point directly into this vector.
+  std::vector<int> jacobian_layout_storage_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc
new file mode 100644
index 0000000..f567aa5
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix.cc
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_dense_matrix.h"
+
+#include <vector>
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
+    const std::vector<int>& blocks) {
+  const int num_blocks = blocks.size();
+  block_layout_.resize(num_blocks, 0);
+  num_rows_ = 0;
+  for (int i = 0; i < num_blocks; ++i) {
+    block_layout_[i] = num_rows_;
+    num_rows_ += blocks[i];
+  }
+
+  values_.reset(new double[num_rows_ * num_rows_]);
+
+  cell_infos_.reset(new CellInfo[num_blocks * num_blocks]);
+  for (int i = 0; i < num_blocks * num_blocks; ++i) {
+    cell_infos_[i].values = values_.get();
+  }
+
+  SetZero();
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
+}
+
+CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
+                                                const int col_block_id,
+                                                int* row,
+                                                int* col,
+                                                int* row_stride,
+                                                int* col_stride) {
+  *row = block_layout_[row_block_id];
+  *col = block_layout_[col_block_id];
+  *row_stride = num_rows_;
+  *col_stride = num_rows_;
+  return &cell_infos_[row_block_id * block_layout_.size() + col_block_id];
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessDenseMatrix::SetZero() {
+  if (num_rows_) {
+    VectorRef(values_.get(), num_rows_ * num_rows_).setZero();
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_dense_matrix.h b/internal/ceres/block_random_access_dense_matrix.h
new file mode 100644
index 0000000..e1dca84
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix.h
@@ -0,0 +1,97 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
+
+#include "ceres/block_random_access_matrix.h"
+
+#include <memory>
+#include <vector>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// A square block random accessible matrix with the same row and
+// column block structure. All cells are stored in the same single
+// array, so that its also accessible as a dense matrix of size
+// num_rows x num_cols.
+//
+// This class is NOT thread safe. Since all n^2 cells are stored,
+// GetCell never returns NULL for any (row_block_id, col_block_id)
+// pair.
+//
+// ReturnCell is a nop.
+class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
+ public:
+  // blocks is a vector of block sizes. The resulting matrix has
+  // blocks.size() * blocks.size() cells.
+  explicit BlockRandomAccessDenseMatrix(const std::vector<int>& blocks);
+  BlockRandomAccessDenseMatrix(const BlockRandomAccessDenseMatrix&) = delete;
+  void operator=(const BlockRandomAccessDenseMatrix&) = delete;
+
+  // The destructor is not thread safe. It assumes that no one is
+  // modifying any cells when the matrix is being destroyed.
+  virtual ~BlockRandomAccessDenseMatrix();
+
+  // BlockRandomAccessMatrix interface.
+  virtual CellInfo* GetCell(int row_block_id,
+                            int col_block_id,
+                            int* row,
+                            int* col,
+                            int* row_stride,
+                            int* col_stride);
+
+  // This is not a thread safe method, it assumes that no cell is
+  // locked.
+  virtual void SetZero();
+
+  // Since the matrix is square with the same row and column block
+  // structure, num_rows() = num_cols().
+  virtual int num_rows() const { return num_rows_; }
+  virtual int num_cols() const { return num_rows_; }
+
+  // The underlying matrix storing the cells.
+  const double* values() const { return values_.get(); }
+  double* mutable_values() { return values_.get(); }
+
+ private:
+  int num_rows_;
+  std::vector<int> block_layout_;
+  std::unique_ptr<double[]> values_;
+  std::unique_ptr<CellInfo[]> cell_infos_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
diff --git a/internal/ceres/block_random_access_dense_matrix_test.cc b/internal/ceres/block_random_access_dense_matrix_test.cc
new file mode 100644
index 0000000..8a5ba59
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix_test.cc
@@ -0,0 +1,115 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(BlockRandomAccessDenseMatrix, GetCell) {
+  std::vector<int> blocks;
+  blocks.push_back(3);
+  blocks.push_back(4);
+  blocks.push_back(5);
+  const int num_rows = 3 + 4 + 5;
+  BlockRandomAccessDenseMatrix m(blocks);
+  EXPECT_EQ(m.num_rows(), num_rows);
+  EXPECT_EQ(m.num_cols(), num_rows);
+
+  int row_idx = 0;
+  for (int i = 0; i < blocks.size(); ++i) {
+    int col_idx = 0;
+    for (int j = 0; j < blocks.size(); ++j) {
+      int row;
+      int col;
+      int row_stride;
+      int col_stride;
+      CellInfo* cell =
+          m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
+
+      EXPECT_TRUE(cell != NULL);
+      EXPECT_EQ(row, row_idx);
+      EXPECT_EQ(col, col_idx);
+      EXPECT_EQ(row_stride, 3 + 4 + 5);
+      EXPECT_EQ(col_stride, 3 + 4 + 5);
+      col_idx += blocks[j];
+    }
+    row_idx += blocks[i];
+  }
+}
+
+TEST(BlockRandomAccessDenseMatrix, WriteCell) {
+  std::vector<int> blocks;
+  blocks.push_back(3);
+  blocks.push_back(4);
+  blocks.push_back(5);
+  const int num_rows = 3 + 4 + 5;
+
+  BlockRandomAccessDenseMatrix m(blocks);
+
+  // Fill the cell (i,j) with (i + 1) * (j + 1)
+  for (int i = 0; i < blocks.size(); ++i) {
+    for (int j = 0; j < blocks.size(); ++j) {
+      int row;
+      int col;
+      int row_stride;
+      int col_stride;
+      CellInfo* cell = m.GetCell(
+          i, j, &row, &col, &row_stride, &col_stride);
+      MatrixRef(cell->values, row_stride, col_stride).block(
+          row, col, blocks[i], blocks[j]) =
+          (i+1) * (j+1) * Matrix::Ones(blocks[i], blocks[j]);
+    }
+  }
+
+  // Check the values in the array are correct by going over the
+  // entries of each block manually.
+  int row_idx = 0;
+  for (int i = 0; i < blocks.size(); ++i) {
+    int col_idx = 0;
+    for (int j = 0; j < blocks.size(); ++j) {
+      // Check the values of this block.
+      for (int r = 0; r < blocks[i]; ++r) {
+        for (int c = 0; c < blocks[j]; ++c) {
+          int pos = row_idx * num_rows + col_idx;
+          EXPECT_EQ(m.values()[pos], (i + 1) * (j + 1));
+        }
+      }
+      col_idx += blocks[j];
+    }
+    row_idx += blocks[i];
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
new file mode 100644
index 0000000..526d173
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -0,0 +1,154 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/internal/port.h"
+#include "ceres/stl_util.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// TODO(sameeragarwal): Drop the dependence on TripletSparseMatrix.
+
+BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
+    const vector<int>& blocks)
+    : blocks_(blocks) {
+  // Build the row/column layout vector and count the number of scalar
+  // rows/columns.
+  int num_cols = 0;
+  int num_nonzeros = 0;
+  vector<int> block_positions;
+  for (int i = 0; i < blocks_.size(); ++i) {
+    block_positions.push_back(num_cols);
+    num_cols += blocks_[i];
+    num_nonzeros += blocks_[i] * blocks_[i];
+  }
+
+  VLOG(1) << "Matrix Size [" << num_cols
+          << "," << num_cols
+          << "] " << num_nonzeros;
+
+  tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+  tsm_->set_num_nonzeros(num_nonzeros);
+  int* rows = tsm_->mutable_rows();
+  int* cols = tsm_->mutable_cols();
+  double* values = tsm_->mutable_values();
+
+  int pos = 0;
+  for (int i = 0; i < blocks_.size(); ++i) {
+    const int block_size = blocks_[i];
+    layout_.push_back(new CellInfo(values + pos));
+    const int block_begin = block_positions[i];
+    for (int r = 0; r < block_size; ++r) {
+      for (int c = 0; c < block_size; ++c, ++pos) {
+        rows[pos] = block_begin + r;
+        cols[pos] = block_begin + c;
+      }
+    }
+  }
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessDiagonalMatrix::~BlockRandomAccessDiagonalMatrix() {
+  STLDeleteContainerPointers(layout_.begin(), layout_.end());
+}
+
+CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
+                                                   int col_block_id,
+                                                   int* row,
+                                                   int* col,
+                                                   int* row_stride,
+                                                   int* col_stride) {
+  if (row_block_id != col_block_id) {
+    return NULL;
+  }
+  const int stride = blocks_[row_block_id];
+
+  // Each cell is stored contiguously as its own little dense matrix.
+  *row = 0;
+  *col = 0;
+  *row_stride = stride;
+  *col_stride = stride;
+  return layout_[row_block_id];
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessDiagonalMatrix::SetZero() {
+  if (tsm_->num_nonzeros()) {
+    VectorRef(tsm_->mutable_values(),
+              tsm_->num_nonzeros()).setZero();
+  }
+}
+
+void BlockRandomAccessDiagonalMatrix::Invert() {
+  double* values = tsm_->mutable_values();
+  for (int i = 0; i < blocks_.size(); ++i) {
+    const int block_size = blocks_[i];
+    MatrixRef block(values, block_size, block_size);
+    block =
+        block
+        .selfadjointView<Eigen::Upper>()
+        .llt()
+        .solve(Matrix::Identity(block_size, block_size));
+    values += block_size * block_size;
+  }
+}
+
+void BlockRandomAccessDiagonalMatrix::RightMultiply(const double* x,
+                                                    double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+  const double* values = tsm_->values();
+  for (int i = 0; i < blocks_.size(); ++i) {
+    const int block_size = blocks_[i];
+    ConstMatrixRef block(values, block_size, block_size);
+    VectorRef(y, block_size).noalias() += block * ConstVectorRef(x, block_size);
+    x += block_size;
+    y += block_size;
+    values += block_size * block_size;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_diagonal_matrix.h b/internal/ceres/block_random_access_diagonal_matrix.h
new file mode 100644
index 0000000..6ad976f
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix.h
@@ -0,0 +1,99 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
+
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/internal/port.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread safe block diagonal matrix implementation of
+// BlockRandomAccessMatrix.
+class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
+ public:
+  // blocks is an array of block sizes.
+  explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
+  BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
+  void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
+
+  // The destructor is not thread safe. It assumes that no one is
+  // modifying any cells when the matrix is being destroyed.
+  virtual ~BlockRandomAccessDiagonalMatrix();
+
+  // BlockRandomAccessMatrix Interface.
+  virtual CellInfo* GetCell(int row_block_id,
+                            int col_block_id,
+                            int* row,
+                            int* col,
+                            int* row_stride,
+                            int* col_stride);
+
+  // This is not a thread safe method, it assumes that no cell is
+  // locked.
+  virtual void SetZero();
+
+  // Invert the matrix assuming that each block is positive definite.
+  void Invert();
+
+  // y += S * x
+  void RightMultiply(const double* x, double* y) const;
+
+  // Since the matrix is square, num_rows() == num_cols().
+  virtual int num_rows() const { return tsm_->num_rows(); }
+  virtual int num_cols() const { return tsm_->num_cols(); }
+
+  const TripletSparseMatrix* matrix() const { return tsm_.get(); }
+  TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
+
+ private:
+  // row/column block sizes.
+  const std::vector<int> blocks_;
+  std::vector<CellInfo*> layout_;
+
+  // The underlying matrix object which actually stores the cells.
+  std::unique_ptr<TripletSparseMatrix> tsm_;
+
+  friend class BlockRandomAccessDiagonalMatrixTest;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
diff --git a/internal/ceres/block_random_access_diagonal_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
new file mode 100644
index 0000000..a54595c
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -0,0 +1,161 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+#include "Eigen/Cholesky"
+
+namespace ceres {
+namespace internal {
+
+class BlockRandomAccessDiagonalMatrixTest : public ::testing::Test {
+ public:
+  void SetUp() {
+    std::vector<int> blocks;
+    blocks.push_back(3);
+    blocks.push_back(4);
+    blocks.push_back(5);
+    const int num_rows = 3 + 4 + 5;
+    num_nonzeros_ =  3 * 3 + 4 * 4 + 5 * 5;
+
+    m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+
+    EXPECT_EQ(m_->num_rows(), num_rows);
+    EXPECT_EQ(m_->num_cols(), num_rows);
+
+    for (int i = 0; i < blocks.size(); ++i) {
+      const int row_block_id = i;
+      int col_block_id;
+      int row;
+      int col;
+      int row_stride;
+      int col_stride;
+
+      for (int j = 0; j < blocks.size(); ++j) {
+        col_block_id = j;
+        CellInfo* cell =  m_->GetCell(row_block_id, col_block_id,
+                                    &row, &col,
+                                    &row_stride, &col_stride);
+        // Off diagonal entries are not present.
+        if (i != j) {
+          EXPECT_TRUE(cell == NULL);
+          continue;
+        }
+
+        EXPECT_TRUE(cell != NULL);
+        EXPECT_EQ(row, 0);
+        EXPECT_EQ(col, 0);
+        EXPECT_EQ(row_stride, blocks[row_block_id]);
+        EXPECT_EQ(col_stride, blocks[col_block_id]);
+
+        // Write into the block
+        MatrixRef(cell->values, row_stride, col_stride).block(
+            row, col, blocks[row_block_id], blocks[col_block_id]) =
+            (row_block_id + 1) * (col_block_id +1) *
+            Matrix::Ones(blocks[row_block_id], blocks[col_block_id])
+            + Matrix::Identity(blocks[row_block_id], blocks[row_block_id]);
+      }
+    }
+  }
+
+ protected:
+  int num_nonzeros_;
+  std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
+};
+
+TEST_F(BlockRandomAccessDiagonalMatrixTest, MatrixContents) {
+  const TripletSparseMatrix* tsm = m_->matrix();
+  EXPECT_EQ(tsm->num_nonzeros(), num_nonzeros_);
+  EXPECT_EQ(tsm->max_num_nonzeros(), num_nonzeros_);
+
+  Matrix dense;
+  tsm->ToDenseMatrix(&dense);
+
+  double kTolerance = 1e-14;
+
+  // (0,0)
+  EXPECT_NEAR((dense.block(0, 0, 3, 3) -
+               (Matrix::Ones(3, 3) + Matrix::Identity(3, 3))).norm(),
+              0.0,
+              kTolerance);
+
+  // (1,1)
+  EXPECT_NEAR((dense.block(3, 3, 4, 4) -
+               (2 * 2 * Matrix::Ones(4, 4) + Matrix::Identity(4, 4))).norm(),
+              0.0,
+              kTolerance);
+
+  // (1,1)
+  EXPECT_NEAR((dense.block(7, 7, 5, 5) -
+               (3 * 3 * Matrix::Ones(5, 5) + Matrix::Identity(5, 5))).norm(),
+              0.0,
+              kTolerance);
+
+  // There is nothing else in the matrix besides these four blocks.
+  EXPECT_NEAR(dense.norm(),
+              sqrt(6 * 1.0 + 3 * 4.0 +
+                   12 * 16.0 + 4 * 25.0 +
+                   20 * 81.0 + 5 * 100.0), kTolerance);
+}
+
+TEST_F(BlockRandomAccessDiagonalMatrixTest, RightMultiply) {
+  double kTolerance = 1e-14;
+  const TripletSparseMatrix* tsm = m_->matrix();
+  Matrix dense;
+  tsm->ToDenseMatrix(&dense);
+  Vector x = Vector::Random(dense.rows());
+  Vector expected_y = dense * x;
+  Vector actual_y = Vector::Zero(dense.rows());
+  m_->RightMultiply(x.data(),  actual_y.data());
+  EXPECT_NEAR((expected_y - actual_y).norm(), 0, kTolerance);
+}
+
+TEST_F(BlockRandomAccessDiagonalMatrixTest, Invert) {
+  double kTolerance = 1e-14;
+  const TripletSparseMatrix* tsm = m_->matrix();
+  Matrix dense;
+  tsm->ToDenseMatrix(&dense);
+  Matrix expected_inverse =
+      dense.llt().solve(Matrix::Identity(dense.rows(), dense.rows()));
+
+  m_->Invert();
+  tsm->ToDenseMatrix(&dense);
+
+  EXPECT_NEAR((expected_inverse - dense).norm(), 0.0, kTolerance);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_matrix.cc b/internal/ceres/block_random_access_matrix.cc
new file mode 100644
index 0000000..347d765
--- /dev/null
+++ b/internal/ceres/block_random_access_matrix.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_matrix.h b/internal/ceres/block_random_access_matrix.h
new file mode 100644
index 0000000..2187fcd
--- /dev/null
+++ b/internal/ceres/block_random_access_matrix.h
@@ -0,0 +1,132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Interface for matrices that allow block based random access.
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
+
+#include <mutex>
+
+namespace ceres {
+namespace internal {
+
+// A matrix implementing the BlockRandomAccessMatrix interface is a
+// matrix whose rows and columns are divided into blocks. For example
+// the matrix A:
+//
+//            3     4      5
+//  A =  5 [c_11  c_12  c_13]
+//       4 [c_21  c_22  c_23]
+//
+// has row blocks of size 5 and 4, and column blocks of size 3, 4 and
+// 5. It has six cells corresponding to the six row-column block
+// combinations.
+//
+// BlockRandomAccessMatrix objects provide access to cells c_ij using
+// the GetCell method. when a cell is present, GetCell will return a
+// CellInfo object containing a pointer to an array which contains the
+// cell as a submatrix and a mutex that guards this submatrix. If the
+// user is accessing the matrix concurrently, it is his responsibility
+// to use the mutex to exclude other writers from writing to the cell
+// concurrently.
+//
+// There is no requirement that all cells be present, i.e. the matrix
+// itself can be block sparse. When a cell is not present, the GetCell
+// method will return a NULL pointer.
+//
+// There is no requirement about how the cells are stored beyond that
+// form a dense submatrix of a larger dense matrix. Like everywhere
+// else in Ceres, RowMajor storage assumed.
+//
+// Example usage:
+//
+//  BlockRandomAccessMatrix* A = new BlockRandomAccessMatrixSubClass(...)
+//
+//  int row, col, row_stride, col_stride;
+//  CellInfo* cell = A->GetCell(row_block_id, col_block_id,
+//                              &row, &col,
+//                              &row_stride, &col_stride);
+//
+//  if (cell != NULL) {
+//     MatrixRef m(cell->values, row_stride, col_stride);
+//     std::lock_guard<std::mutex> l(&cell->m);
+//     m.block(row, col, row_block_size, col_block_size) = ...
+//  }
+
+// Structure to carry a pointer to the array containing a cell and the
+// mutex guarding it.
+struct CellInfo {
+  CellInfo()
+      : values(NULL) {
+  }
+
+  explicit CellInfo(double* ptr)
+      : values(ptr) {
+  }
+
+  double* values;
+  std::mutex m;
+};
+
+class BlockRandomAccessMatrix {
+ public:
+  virtual ~BlockRandomAccessMatrix();
+
+  // If the cell (row_block_id, col_block_id) is present, then return
+  // a CellInfo with a pointer to the dense matrix containing it,
+  // otherwise return NULL. The dense matrix containing this cell has
+  // size row_stride, col_stride and the cell is located at position
+  // (row, col) within this matrix.
+  //
+  // The size of the cell is row_block_size x col_block_size is
+  // assumed known to the caller. row_block_size less than or equal to
+  // row_stride and col_block_size is upper bounded by col_stride.
+  virtual CellInfo* GetCell(int row_block_id,
+                            int col_block_id,
+                            int* row,
+                            int* col,
+                            int* row_stride,
+                            int* col_stride) = 0;
+
+  // Zero out the values of the array. The structure of the matrix
+  // (size and sparsity) is preserved.
+  virtual void SetZero() = 0;
+
+  // Number of scalar rows and columns in the matrix, i.e the sum of
+  // all row blocks and column block sizes respectively.
+  virtual int num_rows() const = 0;
+  virtual int num_cols() const = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
new file mode 100644
index 0000000..9c16454
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -0,0 +1,185 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_sparse_matrix.h"
+
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "ceres/internal/port.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::vector;
+
+BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
+    const vector<int>& blocks,
+    const set<pair<int, int>>& block_pairs)
+    : kMaxRowBlocks(10 * 1000 * 1000),
+      blocks_(blocks) {
+  CHECK_LT(blocks.size(), kMaxRowBlocks);
+
+  // Build the row/column layout vector and count the number of scalar
+  // rows/columns.
+  int num_cols = 0;
+  block_positions_.reserve(blocks_.size());
+  for (int i = 0; i < blocks_.size(); ++i) {
+    block_positions_.push_back(num_cols);
+    num_cols += blocks_[i];
+  }
+
+  // Count the number of scalar non-zero entries and build the layout
+  // object for looking into the values array of the
+  // TripletSparseMatrix.
+  int num_nonzeros = 0;
+  for (const auto& block_pair : block_pairs) {
+    const int row_block_size = blocks_[block_pair.first];
+    const int col_block_size = blocks_[block_pair.second];
+    num_nonzeros += row_block_size * col_block_size;
+  }
+
+  VLOG(1) << "Matrix Size [" << num_cols
+          << "," << num_cols
+          << "] " << num_nonzeros;
+
+  tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+  tsm_->set_num_nonzeros(num_nonzeros);
+  int* rows = tsm_->mutable_rows();
+  int* cols = tsm_->mutable_cols();
+  double* values = tsm_->mutable_values();
+
+  int pos = 0;
+  for (const auto& block_pair : block_pairs) {
+    const int row_block_size = blocks_[block_pair.first];
+    const int col_block_size = blocks_[block_pair.second];
+    cell_values_.push_back(make_pair(block_pair, values + pos));
+    layout_[IntPairToLong(block_pair.first, block_pair.second)] =
+        new CellInfo(values + pos);
+    pos += row_block_size * col_block_size;
+  }
+
+  // Fill the sparsity pattern of the underlying matrix.
+  for (const auto& block_pair : block_pairs) {
+    const int row_block_id = block_pair.first;
+    const int col_block_id = block_pair.second;
+    const int row_block_size = blocks_[row_block_id];
+    const int col_block_size = blocks_[col_block_id];
+    int pos =
+        layout_[IntPairToLong(row_block_id, col_block_id)]->values - values;
+    for (int r = 0; r < row_block_size; ++r) {
+      for (int c = 0; c < col_block_size; ++c, ++pos) {
+          rows[pos] = block_positions_[row_block_id] + r;
+          cols[pos] = block_positions_[col_block_id] + c;
+          values[pos] = 1.0;
+          DCHECK_LT(rows[pos], tsm_->num_rows());
+          DCHECK_LT(cols[pos], tsm_->num_rows());
+      }
+    }
+  }
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessSparseMatrix::~BlockRandomAccessSparseMatrix() {
+  for (const auto& entry : layout_) {
+    delete entry.second;
+  }
+}
+
+CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
+                                                 int col_block_id,
+                                                 int* row,
+                                                 int* col,
+                                                 int* row_stride,
+                                                 int* col_stride) {
+  const LayoutType::iterator it  =
+      layout_.find(IntPairToLong(row_block_id, col_block_id));
+  if (it == layout_.end()) {
+    return NULL;
+  }
+
+  // Each cell is stored contiguously as its own little dense matrix.
+  *row = 0;
+  *col = 0;
+  *row_stride = blocks_[row_block_id];
+  *col_stride = blocks_[col_block_id];
+  return it->second;
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessSparseMatrix::SetZero() {
+  if (tsm_->num_nonzeros()) {
+    VectorRef(tsm_->mutable_values(),
+              tsm_->num_nonzeros()).setZero();
+  }
+}
+
+void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
+                                                           double* y) const {
+  for (const auto& cell_position_and_data : cell_values_) {
+    const int row = cell_position_and_data.first.first;
+    const int row_block_size = blocks_[row];
+    const int row_block_pos = block_positions_[row];
+
+    const int col = cell_position_and_data.first.second;
+    const int col_block_size = blocks_[col];
+    const int col_block_pos = block_positions_[col];
+
+    MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+        cell_position_and_data.second, row_block_size, col_block_size,
+        x + col_block_pos,
+        y + row_block_pos);
+
+    // Since the matrix is symmetric, but only the upper triangular
+    // part is stored, if the block being accessed is not a diagonal
+    // block, then use the same block to do the corresponding lower
+    // triangular multiply also.
+    if (row != col) {
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          cell_position_and_data.second, row_block_size, col_block_size,
+          x + row_block_pos,
+          y + col_block_pos);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
new file mode 100644
index 0000000..12244a5
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -0,0 +1,129 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
+
+#include <cstdint>
+#include <memory>
+#include <set>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+#include "ceres/small_blas.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread safe square block sparse implementation of
+// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
+// for doing the actual storage. This class augments this matrix with
+// an unordered_map that allows random read/write access.
+class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
+ public:
+  // blocks is an array of block sizes. block_pairs is a set of
+  // <row_block_id, col_block_id> pairs to identify the non-zero cells
+  // of this matrix.
+  BlockRandomAccessSparseMatrix(
+      const std::vector<int>& blocks,
+      const std::set<std::pair<int, int>>& block_pairs);
+  BlockRandomAccessSparseMatrix(const BlockRandomAccessSparseMatrix&) = delete;
+  void operator=(const BlockRandomAccessSparseMatrix&) = delete;
+
+  // The destructor is not thread safe. It assumes that no one is
+  // modifying any cells when the matrix is being destroyed.
+  virtual ~BlockRandomAccessSparseMatrix();
+
+  // BlockRandomAccessMatrix Interface.
+  virtual CellInfo* GetCell(int row_block_id,
+                            int col_block_id,
+                            int* row,
+                            int* col,
+                            int* row_stride,
+                            int* col_stride);
+
+  // This is not a thread safe method, it assumes that no cell is
+  // locked.
+  virtual void SetZero();
+
+  // Assume that the matrix is symmetric and only one half of the
+  // matrix is stored.
+  //
+  // y += S * x
+  void SymmetricRightMultiply(const double* x, double* y) const;
+
+  // Since the matrix is square, num_rows() == num_cols().
+  virtual int num_rows() const { return tsm_->num_rows(); }
+  virtual int num_cols() const { return tsm_->num_cols(); }
+
+  // Access to the underlying matrix object.
+  const TripletSparseMatrix* matrix() const { return tsm_.get(); }
+  TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
+
+ private:
+  int64_t IntPairToLong(int row, int col) const {
+    return row * kMaxRowBlocks + col;
+  }
+
+  void LongToIntPair(int64_t index, int* row, int* col) const {
+    *row = index / kMaxRowBlocks;
+    *col = index % kMaxRowBlocks;
+  }
+
+  const int64_t kMaxRowBlocks;
+
+  // row/column block sizes.
+  const std::vector<int> blocks_;
+  std::vector<int> block_positions_;
+
+  // A mapping from <row_block_id, col_block_id> to the position in
+  // the values array of tsm_ where the block is stored.
+  typedef std::unordered_map<long int, CellInfo* > LayoutType;
+  LayoutType layout_;
+
+  // In order traversal of contents of the matrix. This allows us to
+  // implement a matrix-vector which is 20% faster than using the
+  // iterator in the Layout object instead.
+  std::vector<std::pair<std::pair<int, int>, double*>> cell_values_;
+  // The underlying matrix object which actually stores the cells.
+  std::unique_ptr<TripletSparseMatrix> tsm_;
+
+  friend class BlockRandomAccessSparseMatrixTest;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
diff --git a/internal/ceres/block_random_access_sparse_matrix_test.cc b/internal/ceres/block_random_access_sparse_matrix_test.cc
new file mode 100644
index 0000000..9ca9c46
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix_test.cc
@@ -0,0 +1,186 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::vector;
+
+TEST(BlockRandomAccessSparseMatrix, GetCell) {
+  vector<int> blocks;
+  blocks.push_back(3);
+  blocks.push_back(4);
+  blocks.push_back(5);
+  const int num_rows = 3 + 4 + 5;
+
+  set<pair<int, int>> block_pairs;
+  int num_nonzeros = 0;
+  block_pairs.insert(make_pair(0, 0));
+  num_nonzeros += blocks[0] * blocks[0];
+
+  block_pairs.insert(make_pair(1, 1));
+  num_nonzeros += blocks[1] * blocks[1];
+
+  block_pairs.insert(make_pair(1, 2));
+  num_nonzeros += blocks[1] * blocks[2];
+
+  block_pairs.insert(make_pair(0, 2));
+  num_nonzeros += blocks[2] * blocks[0];
+
+  BlockRandomAccessSparseMatrix m(blocks, block_pairs);
+  EXPECT_EQ(m.num_rows(), num_rows);
+  EXPECT_EQ(m.num_cols(), num_rows);
+
+  for (const auto& block_pair : block_pairs) {
+    const int row_block_id = block_pair.first;
+    const int col_block_id = block_pair.second;
+    int row;
+    int col;
+    int row_stride;
+    int col_stride;
+    CellInfo* cell =  m.GetCell(row_block_id, col_block_id,
+                                &row, &col,
+                                &row_stride, &col_stride);
+    EXPECT_TRUE(cell != NULL);
+    EXPECT_EQ(row, 0);
+    EXPECT_EQ(col, 0);
+    EXPECT_EQ(row_stride, blocks[row_block_id]);
+    EXPECT_EQ(col_stride, blocks[col_block_id]);
+
+    // Write into the block
+    MatrixRef(cell->values, row_stride, col_stride).block(
+        row, col, blocks[row_block_id], blocks[col_block_id]) =
+        (row_block_id + 1) * (col_block_id +1) *
+        Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
+  }
+
+  const TripletSparseMatrix* tsm = m.matrix();
+  EXPECT_EQ(tsm->num_nonzeros(), num_nonzeros);
+  EXPECT_EQ(tsm->max_num_nonzeros(), num_nonzeros);
+
+  Matrix dense;
+  tsm->ToDenseMatrix(&dense);
+
+  double kTolerance = 1e-14;
+
+  // (0, 0)
+  EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
+              0.0,
+              kTolerance);
+  // (1, 1)
+  EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
+              0.0,
+              kTolerance);
+  // (1, 2)
+  EXPECT_NEAR((dense.block(3, 3 + 4, 4, 5) - 2 * 3 * Matrix::Ones(4, 5)).norm(),
+              0.0,
+              kTolerance);
+  // (0, 2)
+  EXPECT_NEAR((dense.block(0, 3 + 4, 3, 5) - 3 * 1 * Matrix::Ones(3, 5)).norm(),
+              0.0,
+              kTolerance);
+
+  // There is nothing else in the matrix besides these four blocks.
+  EXPECT_NEAR(dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.),
+              kTolerance);
+
+  Vector x = Vector::Ones(dense.rows());
+  Vector actual_y = Vector::Zero(dense.rows());
+  Vector expected_y = Vector::Zero(dense.rows());
+
+  expected_y += dense.selfadjointView<Eigen::Upper>() * x;
+  m.SymmetricRightMultiply(x.data(), actual_y.data());
+  EXPECT_NEAR((expected_y - actual_y).norm(), 0.0, kTolerance)
+      << "actual: " << actual_y.transpose() << "\n"
+      << "expected: " << expected_y.transpose()
+      << "matrix: \n " << dense;
+}
+
+// IntPairToLong is private, thus this fixture is needed to access and
+// test it.
+class BlockRandomAccessSparseMatrixTest : public ::testing::Test {
+ public:
+  virtual void SetUp() {
+    vector<int> blocks;
+    blocks.push_back(1);
+    set<pair<int, int>> block_pairs;
+    block_pairs.insert(make_pair(0, 0));
+    m_.reset(new BlockRandomAccessSparseMatrix(blocks, block_pairs));
+  }
+
+  void CheckIntPairToLong(int a, int b) {
+    int64_t value = m_->IntPairToLong(a, b);
+    EXPECT_GT(value, 0) << "Overflow a = " << a << " b = " << b;
+    EXPECT_GT(value, a) << "Overflow a = " << a << " b = " << b;
+    EXPECT_GT(value, b) << "Overflow a = " << a << " b = " << b;
+  }
+
+  void CheckLongToIntPair() {
+    uint64_t max_rows =  m_->kMaxRowBlocks;
+    for (int row = max_rows - 10; row < max_rows; ++row) {
+      for (int col = 0; col < 10; ++col) {
+        int row_computed;
+        int col_computed;
+        m_->LongToIntPair(m_->IntPairToLong(row, col),
+                          &row_computed,
+                          &col_computed);
+        EXPECT_EQ(row, row_computed);
+        EXPECT_EQ(col, col_computed);
+      }
+    }
+  }
+
+ private:
+  std::unique_ptr<BlockRandomAccessSparseMatrix> m_;
+};
+
+TEST_F(BlockRandomAccessSparseMatrixTest, IntPairToLongOverflow) {
+  CheckIntPairToLong(std::numeric_limits<int>::max(),
+                     std::numeric_limits<int>::max());
+}
+
+TEST_F(BlockRandomAccessSparseMatrixTest, LongToIntPair) {
+  CheckLongToIntPair();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
new file mode 100644
index 0000000..8f50f35
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -0,0 +1,404 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_sparse_matrix.h"
+
+#include <cstddef>
+#include <algorithm>
+#include <vector>
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/random.h"
+#include "ceres/small_blas.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+BlockSparseMatrix::~BlockSparseMatrix() {}
+
+BlockSparseMatrix::BlockSparseMatrix(
+    CompressedRowBlockStructure* block_structure)
+    : num_rows_(0),
+      num_cols_(0),
+      num_nonzeros_(0),
+      block_structure_(block_structure) {
+  CHECK(block_structure_ != nullptr);
+
+  // Count the number of columns in the matrix.
+  for (int i = 0; i < block_structure_->cols.size(); ++i) {
+    num_cols_ += block_structure_->cols[i].size;
+  }
+
+  // Count the number of non-zero entries and the number of rows in
+  // the matrix.
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_size = block_structure_->rows[i].block.size;
+    num_rows_ += row_block_size;
+
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      num_nonzeros_ += col_block_size * row_block_size;
+    }
+  }
+
+  CHECK_GE(num_rows_, 0);
+  CHECK_GE(num_cols_, 0);
+  CHECK_GE(num_nonzeros_, 0);
+  VLOG(2) << "Allocating values array with "
+          << num_nonzeros_ * sizeof(double) << " bytes.";  // NOLINT
+  values_.reset(new double[num_nonzeros_]);
+  max_num_nonzeros_ = num_nonzeros_;
+  CHECK(values_ != nullptr);
+}
+
+void BlockSparseMatrix::SetZero() {
+  std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
+}
+
+void BlockSparseMatrix::RightMultiply(const double* x,  double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_pos = block_structure_->rows[i].block.position;
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          values_.get() + cells[j].position, row_block_size, col_block_size,
+          x + col_block_pos,
+          y + row_block_pos);
+    }
+  }
+}
+
+void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_pos = block_structure_->rows[i].block.position;
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          values_.get() + cells[j].position, row_block_size, col_block_size,
+          x + row_block_pos,
+          y + col_block_pos);
+    }
+  }
+}
+
+void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
+  CHECK(x != nullptr);
+  VectorRef(x, num_cols_).setZero();
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      const MatrixRef m(values_.get() + cells[j].position,
+                        row_block_size, col_block_size);
+      VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
+    }
+  }
+}
+
+void BlockSparseMatrix::ScaleColumns(const double* scale) {
+  CHECK(scale != nullptr);
+
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      MatrixRef m(values_.get() + cells[j].position,
+                        row_block_size, col_block_size);
+      m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
+    }
+  }
+}
+
+void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+  CHECK(dense_matrix != nullptr);
+
+  dense_matrix->resize(num_rows_, num_cols_);
+  dense_matrix->setZero();
+  Matrix& m = *dense_matrix;
+
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_pos = block_structure_->rows[i].block.position;
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      int jac_pos = cells[j].position;
+      m.block(row_block_pos, col_block_pos, row_block_size, col_block_size)
+          += MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
+    }
+  }
+}
+
+void BlockSparseMatrix::ToTripletSparseMatrix(
+    TripletSparseMatrix* matrix) const {
+  CHECK(matrix != nullptr);
+
+  matrix->Reserve(num_nonzeros_);
+  matrix->Resize(num_rows_, num_cols_);
+  matrix->SetZero();
+
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    int row_block_pos = block_structure_->rows[i].block.position;
+    int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      int col_block_id = cells[j].block_id;
+      int col_block_size = block_structure_->cols[col_block_id].size;
+      int col_block_pos = block_structure_->cols[col_block_id].position;
+      int jac_pos = cells[j].position;
+       for (int r = 0; r < row_block_size; ++r) {
+        for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
+          matrix->mutable_rows()[jac_pos] = row_block_pos + r;
+          matrix->mutable_cols()[jac_pos] = col_block_pos + c;
+          matrix->mutable_values()[jac_pos] = values_[jac_pos];
+        }
+      }
+    }
+  }
+  matrix->set_num_nonzeros(num_nonzeros_);
+}
+
+// Return a pointer to the block structure. We continue to hold
+// ownership of the object though.
+const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
+    const {
+  return block_structure_.get();
+}
+
+void BlockSparseMatrix::ToTextFile(FILE* file) const {
+  CHECK(file != nullptr);
+  for (int i = 0; i < block_structure_->rows.size(); ++i) {
+    const int row_block_pos = block_structure_->rows[i].block.position;
+    const int row_block_size = block_structure_->rows[i].block.size;
+    const vector<Cell>& cells = block_structure_->rows[i].cells;
+    for (int j = 0; j < cells.size(); ++j) {
+      const int col_block_id = cells[j].block_id;
+      const int col_block_size = block_structure_->cols[col_block_id].size;
+      const int col_block_pos = block_structure_->cols[col_block_id].position;
+      int jac_pos = cells[j].position;
+      for (int r = 0; r < row_block_size; ++r) {
+        for (int c = 0; c < col_block_size; ++c) {
+          fprintf(file, "% 10d % 10d %17f\n",
+                  row_block_pos + r,
+                  col_block_pos + c,
+                  values_[jac_pos++]);
+        }
+      }
+    }
+  }
+}
+
+BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
+    const double* diagonal, const std::vector<Block>& column_blocks) {
+  // Create the block structure for the diagonal matrix.
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+  bs->cols = column_blocks;
+  int position = 0;
+  bs->rows.resize(column_blocks.size(), CompressedRow(1));
+  for (int i = 0; i < column_blocks.size(); ++i) {
+    CompressedRow& row = bs->rows[i];
+    row.block = column_blocks[i];
+    Cell& cell = row.cells[0];
+    cell.block_id = i;
+    cell.position = position;
+    position += row.block.size * row.block.size;
+  }
+
+  // Create the BlockSparseMatrix with the given block structure.
+  BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+  matrix->SetZero();
+
+  // Fill the values array of the block sparse matrix.
+  double* values = matrix->mutable_values();
+  for (int i = 0; i < column_blocks.size(); ++i) {
+    const int size = column_blocks[i].size;
+    for (int j = 0; j < size; ++j) {
+      // (j + 1) * size is compact way of accessing the (j,j) entry.
+      values[j * (size + 1)] = diagonal[j];
+    }
+    diagonal += size;
+    values += size * size;
+  }
+
+  return matrix;
+}
+
+void BlockSparseMatrix::AppendRows(const BlockSparseMatrix& m) {
+  CHECK_EQ(m.num_cols(), num_cols());
+  const CompressedRowBlockStructure* m_bs = m.block_structure();
+  CHECK_EQ(m_bs->cols.size(), block_structure_->cols.size());
+
+  const int old_num_nonzeros = num_nonzeros_;
+  const int old_num_row_blocks = block_structure_->rows.size();
+  block_structure_->rows.resize(old_num_row_blocks + m_bs->rows.size());
+
+  for (int i = 0; i < m_bs->rows.size(); ++i) {
+    const CompressedRow& m_row = m_bs->rows[i];
+    CompressedRow& row = block_structure_->rows[old_num_row_blocks + i];
+    row.block.size = m_row.block.size;
+    row.block.position = num_rows_;
+    num_rows_ += m_row.block.size;
+    row.cells.resize(m_row.cells.size());
+    for (int c = 0; c < m_row.cells.size(); ++c) {
+      const int block_id = m_row.cells[c].block_id;
+      row.cells[c].block_id = block_id;
+      row.cells[c].position = num_nonzeros_;
+      num_nonzeros_ += m_row.block.size * m_bs->cols[block_id].size;
+    }
+  }
+
+  if (num_nonzeros_ > max_num_nonzeros_) {
+    double* new_values = new double[num_nonzeros_];
+    std::copy(values_.get(), values_.get() + old_num_nonzeros, new_values);
+    values_.reset(new_values);
+    max_num_nonzeros_ = num_nonzeros_;
+  }
+
+  std::copy(m.values(),
+            m.values() + m.num_nonzeros(),
+            values_.get() + old_num_nonzeros);
+}
+
+void BlockSparseMatrix::DeleteRowBlocks(const int delta_row_blocks) {
+  const int num_row_blocks = block_structure_->rows.size();
+  int delta_num_nonzeros = 0;
+  int delta_num_rows = 0;
+  const std::vector<Block>& column_blocks = block_structure_->cols;
+  for (int i = 0; i < delta_row_blocks; ++i) {
+    const CompressedRow& row = block_structure_->rows[num_row_blocks - i - 1];
+    delta_num_rows += row.block.size;
+    for (int c = 0; c < row.cells.size(); ++c) {
+      const Cell& cell = row.cells[c];
+      delta_num_nonzeros += row.block.size * column_blocks[cell.block_id].size;
+    }
+  }
+  num_nonzeros_ -= delta_num_nonzeros;
+  num_rows_ -= delta_num_rows;
+  block_structure_->rows.resize(num_row_blocks - delta_row_blocks);
+}
+
+BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
+    const BlockSparseMatrix::RandomMatrixOptions& options) {
+  CHECK_GT(options.num_row_blocks, 0);
+  CHECK_GT(options.min_row_block_size, 0);
+  CHECK_GT(options.max_row_block_size, 0);
+  CHECK_LE(options.min_row_block_size, options.max_row_block_size);
+  CHECK_GT(options.block_density, 0.0);
+  CHECK_LE(options.block_density, 1.0);
+
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+  if (options.col_blocks.empty()) {
+    CHECK_GT(options.num_col_blocks, 0);
+    CHECK_GT(options.min_col_block_size, 0);
+    CHECK_GT(options.max_col_block_size, 0);
+    CHECK_LE(options.min_col_block_size, options.max_col_block_size);
+
+    // Generate the col block structure.
+    int col_block_position = 0;
+    for (int i = 0; i < options.num_col_blocks; ++i) {
+      // Generate a random integer in [min_col_block_size, max_col_block_size]
+      const int delta_block_size =
+          Uniform(options.max_col_block_size - options.min_col_block_size);
+      const int col_block_size = options.min_col_block_size + delta_block_size;
+      bs->cols.push_back(Block(col_block_size, col_block_position));
+      col_block_position += col_block_size;
+    }
+  } else {
+    bs->cols = options.col_blocks;
+  }
+
+  bool matrix_has_blocks = false;
+  while (!matrix_has_blocks) {
+    VLOG(1) << "Clearing";
+    bs->rows.clear();
+    int row_block_position = 0;
+    int value_position = 0;
+    for (int r = 0; r < options.num_row_blocks; ++r) {
+
+      const int delta_block_size =
+          Uniform(options.max_row_block_size - options.min_row_block_size);
+      const int row_block_size = options.min_row_block_size + delta_block_size;
+      bs->rows.push_back(CompressedRow());
+      CompressedRow& row = bs->rows.back();
+      row.block.size = row_block_size;
+      row.block.position = row_block_position;
+      row_block_position += row_block_size;
+      for (int c = 0; c < bs->cols.size(); ++c) {
+        if (RandDouble() > options.block_density) continue;
+
+        row.cells.push_back(Cell());
+        Cell& cell = row.cells.back();
+        cell.block_id = c;
+        cell.position = value_position;
+        value_position += row_block_size * bs->cols[c].size;
+        matrix_has_blocks = true;
+      }
+    }
+  }
+
+  BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+  double* values = matrix->mutable_values();
+  for (int i = 0; i < matrix->num_nonzeros(); ++i) {
+    values[i] = RandNormal();
+  }
+
+  return matrix;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
new file mode 100644
index 0000000..366ef87
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix.h
@@ -0,0 +1,139 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Implementation of the SparseMatrix interface for block sparse
+// matrices.
+
+#ifndef CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
+
+#include <memory>
+#include "ceres/block_structure.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class TripletSparseMatrix;
+
+// This class implements the SparseMatrix interface for storing and
+// manipulating block sparse matrices. The block structure is stored
+// in the CompressedRowBlockStructure object and one is needed to
+// initialize the matrix. For details on how the blocks structure of
+// the matrix is stored please see the documentation
+//
+//   internal/ceres/block_structure.h
+//
+class BlockSparseMatrix : public SparseMatrix {
+ public:
+  // Construct a block sparse matrix with a fully initialized
+  // CompressedRowBlockStructure objected. The matrix takes over
+  // ownership of this object and destroys it upon destruction.
+  //
+  // TODO(sameeragarwal): Add a function which will validate legal
+  // CompressedRowBlockStructure objects.
+  explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
+
+  BlockSparseMatrix();
+  BlockSparseMatrix(const BlockSparseMatrix&) = delete;
+  void operator=(const BlockSparseMatrix&) = delete;
+
+  virtual ~BlockSparseMatrix();
+
+  // Implementation of SparseMatrix interface.
+  virtual void SetZero();
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual void LeftMultiply(const double* x, double* y) const;
+  virtual void SquaredColumnNorm(double* x) const;
+  virtual void ScaleColumns(const double* scale);
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+  virtual void ToTextFile(FILE* file) const;
+
+  virtual int num_rows()         const { return num_rows_;     }
+  virtual int num_cols()         const { return num_cols_;     }
+  virtual int num_nonzeros()     const { return num_nonzeros_; }
+  virtual const double* values() const { return values_.get(); }
+  virtual double* mutable_values()     { return values_.get(); }
+
+  void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
+  const CompressedRowBlockStructure* block_structure() const;
+
+  // Append the contents of m to the bottom of this matrix. m must
+  // have the same column blocks structure as this matrix.
+  void AppendRows(const BlockSparseMatrix& m);
+
+  // Delete the bottom delta_rows_blocks.
+  void DeleteRowBlocks(int delta_row_blocks);
+
+  static BlockSparseMatrix* CreateDiagonalMatrix(
+      const double* diagonal,
+      const std::vector<Block>& column_blocks);
+
+  struct RandomMatrixOptions {
+    int num_row_blocks = 0;
+    int min_row_block_size = 0;
+    int max_row_block_size = 0;
+    int num_col_blocks = 0;
+    int min_col_block_size = 0;
+    int max_col_block_size = 0;
+
+    // 0 < block_density <= 1 is the probability of a block being
+    // present in the matrix. A given random matrix will not have
+    // precisely this density.
+    double block_density = 0.0;
+
+    // If col_blocks is non-empty, then the generated random matrix
+    // has this block structure and the column related options in this
+    // struct are ignored.
+    std::vector<Block> col_blocks;
+  };
+
+  // Create a random BlockSparseMatrix whose entries are normally
+  // distributed and whose structure is determined by
+  // RandomMatrixOptions.
+  //
+  // Caller owns the result.
+  static BlockSparseMatrix* CreateRandomMatrix(
+      const RandomMatrixOptions& options);
+
+ private:
+  int num_rows_;
+  int num_cols_;
+  int num_nonzeros_;
+  int max_num_nonzeros_;
+  std::unique_ptr<double[]> values_;
+  std::unique_ptr<CompressedRowBlockStructure> block_structure_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
new file mode 100644
index 0000000..26fa9a2
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -0,0 +1,218 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_sparse_matrix.h"
+
+#include <memory>
+#include <string>
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrixTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(2));
+    CHECK(problem != nullptr);
+    A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+
+    problem.reset(CreateLinearLeastSquaresProblemFromId(1));
+    CHECK(problem != nullptr);
+    B_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+
+    CHECK_EQ(A_->num_rows(), B_->num_rows());
+    CHECK_EQ(A_->num_cols(), B_->num_cols());
+    CHECK_EQ(A_->num_nonzeros(), B_->num_nonzeros());
+  }
+
+  std::unique_ptr<BlockSparseMatrix> A_;
+  std::unique_ptr<TripletSparseMatrix> B_;
+};
+
+TEST_F(BlockSparseMatrixTest, SetZeroTest) {
+  A_->SetZero();
+  EXPECT_EQ(13, A_->num_nonzeros());
+}
+
+TEST_F(BlockSparseMatrixTest, RightMultiplyTest) {
+  Vector y_a = Vector::Zero(A_->num_rows());
+  Vector y_b = Vector::Zero(A_->num_rows());
+  for (int i = 0; i < A_->num_cols(); ++i) {
+    Vector x = Vector::Zero(A_->num_cols());
+    x[i] = 1.0;
+    A_->RightMultiply(x.data(), y_a.data());
+    B_->RightMultiply(x.data(), y_b.data());
+    EXPECT_LT((y_a - y_b).norm(), 1e-12);
+  }
+}
+
+TEST_F(BlockSparseMatrixTest, LeftMultiplyTest) {
+  Vector y_a = Vector::Zero(A_->num_cols());
+  Vector y_b = Vector::Zero(A_->num_cols());
+  for (int i = 0; i < A_->num_rows(); ++i) {
+    Vector x = Vector::Zero(A_->num_rows());
+    x[i] = 1.0;
+    A_->LeftMultiply(x.data(), y_a.data());
+    B_->LeftMultiply(x.data(), y_b.data());
+    EXPECT_LT((y_a - y_b).norm(), 1e-12);
+  }
+}
+
+TEST_F(BlockSparseMatrixTest, SquaredColumnNormTest) {
+  Vector y_a = Vector::Zero(A_->num_cols());
+  Vector y_b = Vector::Zero(A_->num_cols());
+  A_->SquaredColumnNorm(y_a.data());
+  B_->SquaredColumnNorm(y_b.data());
+  EXPECT_LT((y_a - y_b).norm(), 1e-12);
+}
+
+TEST_F(BlockSparseMatrixTest, ToDenseMatrixTest) {
+  Matrix m_a;
+  Matrix m_b;
+  A_->ToDenseMatrix(&m_a);
+  B_->ToDenseMatrix(&m_b);
+  EXPECT_LT((m_a - m_b).norm(), 1e-12);
+}
+
+TEST_F(BlockSparseMatrixTest, AppendRows) {
+  std::unique_ptr<LinearLeastSquaresProblem> problem(
+      CreateLinearLeastSquaresProblemFromId(2));
+  std::unique_ptr<BlockSparseMatrix> m(
+      down_cast<BlockSparseMatrix*>(problem->A.release()));
+  A_->AppendRows(*m);
+  EXPECT_EQ(A_->num_rows(), 2 * m->num_rows());
+  EXPECT_EQ(A_->num_cols(), m->num_cols());
+
+  problem.reset(CreateLinearLeastSquaresProblemFromId(1));
+  std::unique_ptr<TripletSparseMatrix> m2(
+      down_cast<TripletSparseMatrix*>(problem->A.release()));
+  B_->AppendRows(*m2);
+
+  Vector y_a = Vector::Zero(A_->num_rows());
+  Vector y_b = Vector::Zero(A_->num_rows());
+  for (int i = 0; i < A_->num_cols(); ++i) {
+    Vector x = Vector::Zero(A_->num_cols());
+    x[i] = 1.0;
+    y_a.setZero();
+    y_b.setZero();
+
+    A_->RightMultiply(x.data(), y_a.data());
+    B_->RightMultiply(x.data(), y_b.data());
+    EXPECT_LT((y_a - y_b).norm(), 1e-12);
+  }
+}
+
+TEST_F(BlockSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
+  const std::vector<Block>& column_blocks = A_->block_structure()->cols;
+  const int num_cols =
+      column_blocks.back().size + column_blocks.back().position;
+  Vector diagonal(num_cols);
+  for (int i = 0; i < num_cols; ++i) {
+    diagonal(i) = 2 * i * i + 1;
+  }
+  std::unique_ptr<BlockSparseMatrix> appendage(
+      BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks));
+
+  A_->AppendRows(*appendage);
+  Vector y_a, y_b;
+  y_a.resize(A_->num_rows());
+  y_b.resize(A_->num_rows());
+  for (int i = 0; i < A_->num_cols(); ++i) {
+    Vector x = Vector::Zero(A_->num_cols());
+    x[i] = 1.0;
+    y_a.setZero();
+    y_b.setZero();
+
+    A_->RightMultiply(x.data(), y_a.data());
+    B_->RightMultiply(x.data(), y_b.data());
+    EXPECT_LT((y_a.head(B_->num_rows()) - y_b.head(B_->num_rows())).norm(), 1e-12);
+    Vector expected_tail = Vector::Zero(A_->num_cols());
+    expected_tail(i) = diagonal(i);
+    EXPECT_LT((y_a.tail(A_->num_cols()) - expected_tail).norm(), 1e-12);
+  }
+
+
+  A_->DeleteRowBlocks(column_blocks.size());
+  EXPECT_EQ(A_->num_rows(), B_->num_rows());
+  EXPECT_EQ(A_->num_cols(), B_->num_cols());
+
+  y_a.resize(A_->num_rows());
+  y_b.resize(A_->num_rows());
+  for (int i = 0; i < A_->num_cols(); ++i) {
+    Vector x = Vector::Zero(A_->num_cols());
+    x[i] = 1.0;
+    y_a.setZero();
+    y_b.setZero();
+
+    A_->RightMultiply(x.data(), y_a.data());
+    B_->RightMultiply(x.data(), y_b.data());
+    EXPECT_LT((y_a - y_b).norm(), 1e-12);
+  }
+}
+
+TEST(BlockSparseMatrix, CreateDiagonalMatrix) {
+  std::vector<Block> column_blocks;
+  column_blocks.push_back(Block(2, 0));
+  column_blocks.push_back(Block(1, 2));
+  column_blocks.push_back(Block(3, 3));
+  const int num_cols =
+      column_blocks.back().size + column_blocks.back().position;
+  Vector diagonal(num_cols);
+  for (int i = 0; i < num_cols; ++i) {
+    diagonal(i) = 2 * i * i + 1;
+  }
+
+  std::unique_ptr<BlockSparseMatrix> m(
+      BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks));
+  const CompressedRowBlockStructure* bs = m->block_structure();
+  EXPECT_EQ(bs->cols.size(), column_blocks.size());
+  for (int i = 0; i < column_blocks.size(); ++i) {
+    EXPECT_EQ(bs->cols[i].size, column_blocks[i].size);
+    EXPECT_EQ(bs->cols[i].position, column_blocks[i].position);
+  }
+  EXPECT_EQ(m->num_rows(), m->num_cols());
+  Vector x = Vector::Ones(num_cols);
+  Vector y = Vector::Zero(num_cols);
+  m->RightMultiply(x.data(), y.data());
+  for (int i = 0; i < num_cols; ++i) {
+    EXPECT_NEAR(y[i], diagonal[i], std::numeric_limits<double>::epsilon());
+  }
+}
+
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_structure.cc b/internal/ceres/block_structure.cc
new file mode 100644
index 0000000..6479b60
--- /dev/null
+++ b/internal/ceres/block_structure.cc
@@ -0,0 +1,44 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_structure.h"
+
+namespace ceres {
+namespace internal {
+
+bool CellLessThan(const Cell& lhs, const Cell& rhs) {
+  if (lhs.block_id == rhs.block_id) {
+    return (lhs.position  < rhs.position);
+  }
+  return (lhs.block_id < rhs.block_id);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
new file mode 100644
index 0000000..b5218c0
--- /dev/null
+++ b/internal/ceres/block_structure.h
@@ -0,0 +1,98 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Block structure objects are used to carry information about the
+// dense block structure of sparse matrices. The BlockSparseMatrix
+// object uses the BlockStructure objects to keep track of the matrix
+// structure and operate upon it. This allows us to use more cache
+// friendly block oriented linear algebra operations on the matrix
+// instead of accessing it one scalar entry at a time.
+
+#ifndef CERES_INTERNAL_BLOCK_STRUCTURE_H_
+#define CERES_INTERNAL_BLOCK_STRUCTURE_H_
+
+#include <cstdint>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+typedef int32_t BlockSize;
+
+struct Block {
+  Block() : size(-1), position(-1) {}
+  Block(int size_, int position_) : size(size_), position(position_) {}
+
+  BlockSize size;
+  int position;  // Position along the row/column.
+};
+
+struct Cell {
+  Cell() : block_id(-1), position(-1) {}
+  Cell(int block_id_, int position_)
+      : block_id(block_id_), position(position_) {}
+
+  // Column or row block id as the case maybe.
+  int block_id;
+  // Where in the values array of the jacobian is this cell located.
+  int position;
+};
+
+// Order cell by their block_id;
+bool CellLessThan(const Cell& lhs, const Cell& rhs);
+
+struct CompressedList {
+  CompressedList() {}
+
+  // Construct a CompressedList with the cells containing num_cells
+  // entries.
+  CompressedList(int num_cells) : cells(num_cells) {}
+  Block block;
+  std::vector<Cell> cells;
+};
+
+typedef CompressedList CompressedRow;
+typedef CompressedList CompressedColumn;
+
+struct CompressedRowBlockStructure {
+  std::vector<Block> cols;
+  std::vector<CompressedRow> rows;
+};
+
+struct CompressedColumnBlockStructure {
+  std::vector<Block> rows;
+  std::vector<CompressedColumn> cols;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_BLOCK_STRUCTURE_H_
diff --git a/internal/ceres/bundle_adjustment_test_util.h b/internal/ceres/bundle_adjustment_test_util.h
new file mode 100644
index 0000000..7e076eb
--- /dev/null
+++ b/internal/ceres/bundle_adjustment_test_util.h
@@ -0,0 +1,247 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// End-to-end bundle adjustment test utilities for Ceres. This base is used in
+// the generated bundle adjustment test binaries. The reason to split the
+// bundle tests into separate binaries is so the tests can get parallelized.
+
+#include <cmath>
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+
+#include "ceres/internal/port.h"
+
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/problem.h"
+#include "ceres/rotation.h"
+#include "ceres/solver.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+const bool kAutomaticOrdering = true;
+const bool kUserOrdering = false;
+
+// This class implements the SystemTestProblem interface and provides
+// access to a bundle adjustment problem. It is based on
+// examples/bundle_adjustment_example.cc. Currently a small 16 camera
+// problem is hard coded in the constructor.
+class BundleAdjustmentProblem {
+ public:
+  BundleAdjustmentProblem() {
+    const string input_file = TestFileAbsolutePath("problem-16-22106-pre.txt");
+    ReadData(input_file);
+    BuildProblem();
+  }
+
+  ~BundleAdjustmentProblem() {
+    delete []point_index_;
+    delete []camera_index_;
+    delete []observations_;
+    delete []parameters_;
+  }
+
+  Problem* mutable_problem() { return &problem_; }
+  Solver::Options* mutable_solver_options() { return &options_; }
+
+  int num_cameras()            const { return num_cameras_;        }
+  int num_points()             const { return num_points_;         }
+  int num_observations()       const { return num_observations_;   }
+  const int* point_index()     const { return point_index_;  }
+  const int* camera_index()    const { return camera_index_; }
+  const double* observations() const { return observations_; }
+  double* mutable_cameras() { return parameters_; }
+  double* mutable_points() { return parameters_  + 9 * num_cameras_; }
+
+  static double kResidualTolerance;
+
+ private:
+  void ReadData(const string& filename) {
+    FILE * fptr = fopen(filename.c_str(), "r");
+
+    if (!fptr) {
+      LOG(FATAL) << "File Error: unable to open file " << filename;
+    }
+
+    // This will die horribly on invalid files. Them's the breaks.
+    FscanfOrDie(fptr, "%d", &num_cameras_);
+    FscanfOrDie(fptr, "%d", &num_points_);
+    FscanfOrDie(fptr, "%d", &num_observations_);
+
+    VLOG(1) << "Header: " << num_cameras_
+            << " " << num_points_
+            << " " << num_observations_;
+
+    point_index_ = new int[num_observations_];
+    camera_index_ = new int[num_observations_];
+    observations_ = new double[2 * num_observations_];
+
+    num_parameters_ = 9 * num_cameras_ + 3 * num_points_;
+    parameters_ = new double[num_parameters_];
+
+    for (int i = 0; i < num_observations_; ++i) {
+      FscanfOrDie(fptr, "%d", camera_index_ + i);
+      FscanfOrDie(fptr, "%d", point_index_ + i);
+      for (int j = 0; j < 2; ++j) {
+        FscanfOrDie(fptr, "%lf", observations_ + 2*i + j);
+      }
+    }
+
+    for (int i = 0; i < num_parameters_; ++i) {
+      FscanfOrDie(fptr, "%lf", parameters_ + i);
+    }
+
+    fclose(fptr);
+  }
+
+  void BuildProblem() {
+    double* points = mutable_points();
+    double* cameras = mutable_cameras();
+
+    for (int i = 0; i < num_observations(); ++i) {
+      // Each Residual block takes a point and a camera as input and
+      // outputs a 2 dimensional residual.
+      CostFunction* cost_function =
+          new AutoDiffCostFunction<BundlerResidual, 2, 9, 3>(
+              new BundlerResidual(observations_[2*i + 0],
+                                  observations_[2*i + 1]));
+
+      // Each observation corresponds to a pair of a camera and a point
+      // which are identified by camera_index()[i] and
+      // point_index()[i] respectively.
+      double* camera = cameras + 9 * camera_index_[i];
+      double* point = points + 3 * point_index()[i];
+      problem_.AddResidualBlock(cost_function, NULL, camera, point);
+    }
+
+    options_.linear_solver_ordering.reset(new ParameterBlockOrdering);
+
+    // The points come before the cameras.
+    for (int i = 0; i < num_points_; ++i) {
+      options_.linear_solver_ordering->AddElementToGroup(points + 3 * i, 0);
+    }
+
+    for (int i = 0; i < num_cameras_; ++i) {
+      options_.linear_solver_ordering->AddElementToGroup(cameras + 9 * i, 1);
+    }
+
+    options_.linear_solver_type = DENSE_SCHUR;
+    options_.max_num_iterations = 25;
+    options_.function_tolerance = 1e-10;
+    options_.gradient_tolerance = 1e-10;
+    options_.parameter_tolerance = 1e-10;
+  }
+
+  template<typename T>
+  void FscanfOrDie(FILE *fptr, const char *format, T *value) {
+    int num_scanned = fscanf(fptr, format, value);
+    if (num_scanned != 1) {
+      LOG(FATAL) << "Invalid UW data file.";
+    }
+  }
+
+  // Templated pinhole camera model.  The camera is parameterized
+  // using 9 parameters. 3 for rotation, 3 for translation, 1 for
+  // focal length and 2 for radial distortion. The principal point is
+  // not modeled (i.e. it is assumed to be located at the image
+  // center).
+  struct BundlerResidual {
+    // (u, v): the position of the observation with respect to the image
+    // center point.
+    BundlerResidual(double u, double v): u(u), v(v) {}
+
+    template <typename T>
+    bool operator()(const T* const camera,
+                    const T* const point,
+                    T* residuals) const {
+      T p[3];
+      AngleAxisRotatePoint(camera, point, p);
+
+      // Add the translation vector
+      p[0] += camera[3];
+      p[1] += camera[4];
+      p[2] += camera[5];
+
+      const T& focal = camera[6];
+      const T& l1 = camera[7];
+      const T& l2 = camera[8];
+
+      // Compute the center of distortion.  The sign change comes from
+      // the camera model that Noah Snavely's Bundler assumes, whereby
+      // the camera coordinate system has a negative z axis.
+      T xp = - focal * p[0] / p[2];
+      T yp = - focal * p[1] / p[2];
+
+      // Apply second and fourth order radial distortion.
+      T r2 = xp*xp + yp*yp;
+      T distortion = T(1.0) + r2  * (l1 + l2  * r2);
+
+      residuals[0] = distortion * xp - u;
+      residuals[1] = distortion * yp - v;
+
+      return true;
+    }
+
+    double u;
+    double v;
+  };
+
+  Problem problem_;
+  Solver::Options options_;
+
+  int num_cameras_;
+  int num_points_;
+  int num_observations_;
+  int num_parameters_;
+
+  int* point_index_;
+  int* camera_index_;
+  double* observations_;
+  // The parameter vector is laid out as follows
+  // [camera_1, ..., camera_n, point_1, ..., point_m]
+  double* parameters_;
+};
+
+double BundleAdjustmentProblem::kResidualTolerance = 1e-4;
+typedef SystemTest<BundleAdjustmentProblem> BundleAdjustmentTest;
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/c_api.cc b/internal/ceres/c_api.cc
new file mode 100644
index 0000000..ada8f3e
--- /dev/null
+++ b/internal/ceres/c_api.cc
@@ -0,0 +1,188 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+//
+// An incomplete C API for Ceres.
+//
+// TODO(keir): Figure out why logging does not seem to work.
+
+#include "ceres/c_api.h"
+
+#include <vector>
+#include <iostream>
+#include <string>
+#include "ceres/cost_function.h"
+#include "ceres/loss_function.h"
+#include "ceres/problem.h"
+#include "ceres/solver.h"
+#include "ceres/types.h"  // for std
+#include "glog/logging.h"
+
+using ceres::Problem;
+
+void ceres_init() {
+  // This is not ideal, but it's not clear what to do if there is no gflags and
+  // no access to command line arguments.
+  char message[] = "<unknown>";
+  google::InitGoogleLogging(message);
+}
+
+ceres_problem_t* ceres_create_problem() {
+  return reinterpret_cast<ceres_problem_t*>(new Problem);
+}
+
+void ceres_free_problem(ceres_problem_t* problem) {
+  delete reinterpret_cast<Problem*>(problem);
+}
+
+// This cost function wraps a C-level function pointer from the user, to bridge
+// between C and C++.
+class CallbackCostFunction : public ceres::CostFunction {
+ public:
+  CallbackCostFunction(ceres_cost_function_t cost_function,
+                       void* user_data,
+                       int num_residuals,
+                       int num_parameter_blocks,
+                       int* parameter_block_sizes)
+      : cost_function_(cost_function),
+        user_data_(user_data) {
+    set_num_residuals(num_residuals);
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      mutable_parameter_block_sizes()->push_back(parameter_block_sizes[i]);
+    }
+  }
+
+  virtual ~CallbackCostFunction() {}
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return (*cost_function_)(user_data_,
+                             const_cast<double**>(parameters),
+                             residuals,
+                             jacobians);
+  }
+
+ private:
+  ceres_cost_function_t cost_function_;
+  void* user_data_;
+};
+
+// This loss function wraps a C-level function pointer from the user, to bridge
+// between C and C++.
+class CallbackLossFunction : public ceres::LossFunction {
+ public:
+  explicit CallbackLossFunction(ceres_loss_function_t loss_function,
+                                void* user_data)
+    : loss_function_(loss_function), user_data_(user_data) {}
+  virtual void Evaluate(double sq_norm, double* rho) const {
+    (*loss_function_)(user_data_, sq_norm, rho);
+  }
+
+ private:
+  ceres_loss_function_t loss_function_;
+  void* user_data_;
+};
+
+// Wrappers for the stock loss functions.
+void* ceres_create_huber_loss_function_data(double a) {
+  return new ceres::HuberLoss(a);
+}
+void* ceres_create_softl1_loss_function_data(double a) {
+  return new ceres::SoftLOneLoss(a);
+}
+void* ceres_create_cauchy_loss_function_data(double a) {
+  return new ceres::CauchyLoss(a);
+}
+void* ceres_create_arctan_loss_function_data(double a) {
+  return new ceres::ArctanLoss(a);
+}
+void* ceres_create_tolerant_loss_function_data(double a, double b) {
+  return new ceres::TolerantLoss(a, b);
+}
+
+void ceres_free_stock_loss_function_data(void* loss_function_data) {
+  delete reinterpret_cast<ceres::LossFunction*>(loss_function_data);
+}
+
+void ceres_stock_loss_function(void* user_data,
+                               double squared_norm,
+                               double out[3]) {
+  reinterpret_cast<ceres::LossFunction*>(user_data)
+      ->Evaluate(squared_norm, out);
+}
+
+ceres_residual_block_id_t* ceres_problem_add_residual_block(
+    ceres_problem_t* problem,
+    ceres_cost_function_t cost_function,
+    void* cost_function_data,
+    ceres_loss_function_t loss_function,
+    void* loss_function_data,
+    int num_residuals,
+    int num_parameter_blocks,
+    int* parameter_block_sizes,
+    double** parameters) {
+  Problem* ceres_problem = reinterpret_cast<Problem*>(problem);
+
+  ceres::CostFunction* callback_cost_function =
+      new CallbackCostFunction(cost_function,
+                               cost_function_data,
+                               num_residuals,
+                               num_parameter_blocks,
+                               parameter_block_sizes);
+
+  ceres::LossFunction* callback_loss_function = NULL;
+  if (loss_function != NULL) {
+    callback_loss_function = new CallbackLossFunction(loss_function,
+                                                      loss_function_data);
+  }
+
+  std::vector<double*> parameter_blocks(parameters,
+                                        parameters + num_parameter_blocks);
+  return reinterpret_cast<ceres_residual_block_id_t*>(
+      ceres_problem->AddResidualBlock(callback_cost_function,
+                                      callback_loss_function,
+                                      parameter_blocks));
+}
+
+void ceres_solve(ceres_problem_t* c_problem) {
+  Problem* problem = reinterpret_cast<Problem*>(c_problem);
+
+  // TODO(keir): Obviously, this way of setting options won't scale or last.
+  // Instead, figure out a way to specify some of the options without
+  // duplicating everything.
+  ceres::Solver::Options options;
+  options.max_num_iterations = 100;
+  options.linear_solver_type = ceres::DENSE_QR;
+  options.minimizer_progress_to_stdout = true;
+
+  ceres::Solver::Summary summary;
+  ceres::Solve(options, problem, &summary);
+  std::cout << summary.FullReport() << "\n";
+}
diff --git a/internal/ceres/c_api_test.cc b/internal/ceres/c_api_test.cc
new file mode 100644
index 0000000..95b727a
--- /dev/null
+++ b/internal/ceres/c_api_test.cc
@@ -0,0 +1,221 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "ceres/c_api.h"
+
+#include <cmath>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+// Duplicated from curve_fitting.cc.
+int num_observations = 67;
+double data[] = {
+  0.000000e+00, 1.133898e+00,
+  7.500000e-02, 1.334902e+00,
+  1.500000e-01, 1.213546e+00,
+  2.250000e-01, 1.252016e+00,
+  3.000000e-01, 1.392265e+00,
+  3.750000e-01, 1.314458e+00,
+  4.500000e-01, 1.472541e+00,
+  5.250000e-01, 1.536218e+00,
+  6.000000e-01, 1.355679e+00,
+  6.750000e-01, 1.463566e+00,
+  7.500000e-01, 1.490201e+00,
+  8.250000e-01, 1.658699e+00,
+  9.000000e-01, 1.067574e+00,
+  9.750000e-01, 1.464629e+00,
+  1.050000e+00, 1.402653e+00,
+  1.125000e+00, 1.713141e+00,
+  1.200000e+00, 1.527021e+00,
+  1.275000e+00, 1.702632e+00,
+  1.350000e+00, 1.423899e+00,
+  1.425000e+00, 1.543078e+00,
+  1.500000e+00, 1.664015e+00,
+  1.575000e+00, 1.732484e+00,
+  1.650000e+00, 1.543296e+00,
+  1.725000e+00, 1.959523e+00,
+  1.800000e+00, 1.685132e+00,
+  1.875000e+00, 1.951791e+00,
+  1.950000e+00, 2.095346e+00,
+  2.025000e+00, 2.361460e+00,
+  2.100000e+00, 2.169119e+00,
+  2.175000e+00, 2.061745e+00,
+  2.250000e+00, 2.178641e+00,
+  2.325000e+00, 2.104346e+00,
+  2.400000e+00, 2.584470e+00,
+  2.475000e+00, 1.914158e+00,
+  2.550000e+00, 2.368375e+00,
+  2.625000e+00, 2.686125e+00,
+  2.700000e+00, 2.712395e+00,
+  2.775000e+00, 2.499511e+00,
+  2.850000e+00, 2.558897e+00,
+  2.925000e+00, 2.309154e+00,
+  3.000000e+00, 2.869503e+00,
+  3.075000e+00, 3.116645e+00,
+  3.150000e+00, 3.094907e+00,
+  3.225000e+00, 2.471759e+00,
+  3.300000e+00, 3.017131e+00,
+  3.375000e+00, 3.232381e+00,
+  3.450000e+00, 2.944596e+00,
+  3.525000e+00, 3.385343e+00,
+  3.600000e+00, 3.199826e+00,
+  3.675000e+00, 3.423039e+00,
+  3.750000e+00, 3.621552e+00,
+  3.825000e+00, 3.559255e+00,
+  3.900000e+00, 3.530713e+00,
+  3.975000e+00, 3.561766e+00,
+  4.050000e+00, 3.544574e+00,
+  4.125000e+00, 3.867945e+00,
+  4.200000e+00, 4.049776e+00,
+  4.275000e+00, 3.885601e+00,
+  4.350000e+00, 4.110505e+00,
+  4.425000e+00, 4.345320e+00,
+  4.500000e+00, 4.161241e+00,
+  4.575000e+00, 4.363407e+00,
+  4.650000e+00, 4.161576e+00,
+  4.725000e+00, 4.619728e+00,
+  4.800000e+00, 4.737410e+00,
+  4.875000e+00, 4.727863e+00,
+  4.950000e+00, 4.669206e+00,
+};
+
+// A test cost function, similar to the one in curve_fitting.c.
+int exponential_residual(void* user_data,
+                         double** parameters,
+                         double* residuals,
+                         double** jacobians) {
+  double* measurement = (double*) user_data;
+  double x = measurement[0];
+  double y = measurement[1];
+  double m = parameters[0][0];
+  double c = parameters[1][0];
+
+  residuals[0] = y - exp(m * x + c);
+  if (jacobians == NULL) {
+    return 1;
+  }
+  if (jacobians[0] != NULL) {
+    jacobians[0][0] = - x * exp(m * x + c);  // dr/dm
+  }
+  if (jacobians[1] != NULL) {
+    jacobians[1][0] =     - exp(m * x + c);  // dr/dc
+  }
+  return 1;
+}
+
+namespace ceres {
+namespace internal {
+
+TEST(C_API, SimpleEndToEndTest) {
+  double m = 0.0;
+  double c = 0.0;
+  double *parameter_pointers[] = { &m, &c };
+  int parameter_sizes[] = { 1, 1 };
+
+  ceres_problem_t* problem = ceres_create_problem();
+  for (int i = 0; i < num_observations; ++i) {
+    ceres_problem_add_residual_block(
+        problem,
+        exponential_residual,  // Cost function
+        &data[2 * i],          // Points to the (x,y) measurement
+        NULL,                  // Loss function
+        NULL,                  // Loss function user data
+        1,                     // Number of residuals
+        2,                     // Number of parameter blocks
+        parameter_sizes,
+        parameter_pointers);
+  }
+
+  ceres_solve(problem);
+
+  EXPECT_NEAR(0.3, m, 0.02);
+  EXPECT_NEAR(0.1, c, 0.04);
+
+  ceres_free_problem(problem);
+}
+
+template<typename T>
+class ScopedSetValue {
+ public:
+  ScopedSetValue(T* variable, T new_value)
+      : variable_(variable), old_value_(*variable) {
+    *variable = new_value;
+  }
+  ~ScopedSetValue() {
+    *variable_ = old_value_;
+  }
+
+ private:
+  T* variable_;
+  T old_value_;
+};
+
+TEST(C_API, LossFunctions) {
+  double m = 0.2;
+  double c = 0.03;
+  double *parameter_pointers[] = { &m, &c };
+  int parameter_sizes[] = { 1, 1 };
+
+  // Create two outliers, but be careful to leave the data intact.
+  ScopedSetValue<double> outlier1x(&data[12], 2.5);
+  ScopedSetValue<double> outlier1y(&data[13], 1.0e3);
+  ScopedSetValue<double> outlier2x(&data[14], 3.2);
+  ScopedSetValue<double> outlier2y(&data[15], 30e3);
+
+  // Create a cauchy cost function, and reuse it many times.
+  void* cauchy_loss_data =
+      ceres_create_cauchy_loss_function_data(5.0);
+
+  ceres_problem_t* problem = ceres_create_problem();
+  for (int i = 0; i < num_observations; ++i) {
+    ceres_problem_add_residual_block(
+        problem,
+        exponential_residual,  // Cost function
+        &data[2 * i],          // Points to the (x,y) measurement
+        ceres_stock_loss_function,
+        cauchy_loss_data,      // Loss function user data
+        1,                     // Number of residuals
+        2,                     // Number of parameter blocks
+        parameter_sizes,
+        parameter_pointers);
+  }
+
+  ceres_solve(problem);
+
+  EXPECT_NEAR(0.3, m, 0.02);
+  EXPECT_NEAR(0.1, c, 0.04);
+
+  ceres_free_stock_loss_function_data(cauchy_loss_data);
+  ceres_free_problem(problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
new file mode 100644
index 0000000..84576e4
--- /dev/null
+++ b/internal/ceres/callbacks.cc
@@ -0,0 +1,131 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <iostream>  // NO LINT
+#include "ceres/callbacks.h"
+#include "ceres/program.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+StateUpdatingCallback::StateUpdatingCallback(Program* program,
+                                             double* parameters)
+    : program_(program), parameters_(parameters) {}
+
+StateUpdatingCallback::~StateUpdatingCallback() {}
+
+CallbackReturnType StateUpdatingCallback::operator()(
+    const IterationSummary& summary) {
+  program_->StateVectorToParameterBlocks(parameters_);
+  program_->CopyParameterBlockStateToUserState();
+  return SOLVER_CONTINUE;
+}
+
+GradientProblemSolverStateUpdatingCallback::
+    GradientProblemSolverStateUpdatingCallback(
+        int num_parameters,
+        const double* internal_parameters,
+        double* user_parameters)
+    : num_parameters_(num_parameters),
+      internal_parameters_(internal_parameters),
+      user_parameters_(user_parameters) {}
+
+GradientProblemSolverStateUpdatingCallback::
+    ~GradientProblemSolverStateUpdatingCallback() {}
+
+CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
+    const IterationSummary& summary) {
+  if (summary.step_is_successful) {
+    std::copy(internal_parameters_,
+              internal_parameters_ + num_parameters_,
+              user_parameters_);
+  }
+  return SOLVER_CONTINUE;
+}
+
+LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
+                                 const bool log_to_stdout)
+    : minimizer_type(minimizer_type),
+      log_to_stdout_(log_to_stdout) {}
+
+LoggingCallback::~LoggingCallback() {}
+
+CallbackReturnType LoggingCallback::operator()(
+    const IterationSummary& summary) {
+  string output;
+  if (minimizer_type == LINE_SEARCH) {
+    const char* kReportRowFormat =
+        "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
+        "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
+    output = StringPrintf(kReportRowFormat,
+                          summary.iteration,
+                          summary.cost,
+                          summary.cost_change,
+                          summary.gradient_max_norm,
+                          summary.step_norm,
+                          summary.step_size,
+                          summary.line_search_function_evaluations,
+                          summary.iteration_time_in_seconds,
+                          summary.cumulative_time_in_seconds);
+  } else if (minimizer_type == TRUST_REGION) {
+    if (summary.iteration == 0) {
+      output = "iter      cost      cost_change  |gradient|   |step|    tr_ratio  tr_radius  ls_iter  iter_time  total_time\n";  // NOLINT
+    }
+    const char* kReportRowFormat =
+        "% 4d % 8e   % 3.2e   % 3.2e  % 3.2e  % 3.2e % 3.2e     % 4d   % 3.2e   % 3.2e";  // NOLINT
+    output += StringPrintf(kReportRowFormat,
+                           summary.iteration,
+                           summary.cost,
+                           summary.cost_change,
+                           summary.gradient_max_norm,
+                           summary.step_norm,
+                           summary.relative_decrease,
+                           summary.trust_region_radius,
+                           summary.linear_solver_iterations,
+                           summary.iteration_time_in_seconds,
+                           summary.cumulative_time_in_seconds);
+  } else {
+    LOG(FATAL) << "Unknown minimizer type.";
+  }
+
+  if (log_to_stdout_) {
+    std::cout << output << std::endl;
+  } else {
+    VLOG(1) << output;
+  }
+  return SOLVER_CONTINUE;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/callbacks.h b/internal/ceres/callbacks.h
new file mode 100644
index 0000000..288d6ae
--- /dev/null
+++ b/internal/ceres/callbacks.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_CALLBACKS_H_
+#define CERES_INTERNAL_CALLBACKS_H_
+
+#include <string>
+#include "ceres/iteration_callback.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Callback for updating the externally visible state of parameter
+// blocks.
+class StateUpdatingCallback : public IterationCallback {
+ public:
+  StateUpdatingCallback(Program* program, double* parameters);
+  virtual ~StateUpdatingCallback();
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+ private:
+  Program* program_;
+  double* parameters_;
+};
+
+// Callback for updating the externally visible state of the
+// parameters vector for GradientProblemSolver.
+class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
+ public:
+  GradientProblemSolverStateUpdatingCallback(int num_parameters,
+                                             const double* internal_parameters,
+                                             double* user_parameters);
+  virtual ~GradientProblemSolverStateUpdatingCallback();
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+ private:
+  int num_parameters_;
+  const double* internal_parameters_;
+  double* user_parameters_;
+};
+
+// Callback for logging the state of the minimizer to STDERR or
+// STDOUT depending on the user's preferences and logging level.
+class LoggingCallback : public IterationCallback {
+ public:
+  LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
+  virtual ~LoggingCallback();
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+
+ private:
+  const MinimizerType minimizer_type;
+  const bool log_to_stdout_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CALLBACKS_H_
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
new file mode 100644
index 0000000..b2fd49f
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -0,0 +1,232 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: David Gallup (dgallup@google.com)
+//         Sameer Agarwal (sameeragarwal@google.com)
+
+#include "ceres/canonical_views_clustering.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+#include "ceres/graph.h"
+#include "ceres/map_util.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+typedef std::unordered_map<int, int> IntMap;
+typedef std::unordered_set<int> IntSet;
+
+class CanonicalViewsClustering {
+ public:
+  CanonicalViewsClustering() {}
+
+  // Compute the canonical views clustering of the vertices of the
+  // graph. centers will contain the vertices that are the identified
+  // as the canonical views/cluster centers, and membership is a map
+  // from vertices to cluster_ids. The i^th cluster center corresponds
+  // to the i^th cluster. It is possible depending on the
+  // configuration of the clustering algorithm that some of the
+  // vertices may not be assigned to any cluster. In this case they
+  // are assigned to a cluster with id = kInvalidClusterId.
+  void ComputeClustering(const CanonicalViewsClusteringOptions& options,
+                         const WeightedGraph<int>& graph,
+                         vector<int>* centers,
+                         IntMap* membership);
+
+ private:
+  void FindValidViews(IntSet* valid_views) const;
+  double ComputeClusteringQualityDifference(const int candidate,
+                                            const vector<int>& centers) const;
+  void UpdateCanonicalViewAssignments(const int canonical_view);
+  void ComputeClusterMembership(const vector<int>& centers,
+                                IntMap* membership) const;
+
+  CanonicalViewsClusteringOptions options_;
+  const WeightedGraph<int>* graph_;
+  // Maps a view to its representative canonical view (its cluster
+  // center).
+  IntMap view_to_canonical_view_;
+  // Maps a view to its similarity to its current cluster center.
+  std::unordered_map<int, double> view_to_canonical_view_similarity_;
+};
+
+void ComputeCanonicalViewsClustering(
+    const CanonicalViewsClusteringOptions& options,
+    const WeightedGraph<int>& graph,
+    vector<int>* centers,
+    IntMap* membership) {
+  time_t start_time = time(NULL);
+  CanonicalViewsClustering cv;
+  cv.ComputeClustering(options, graph, centers, membership);
+  VLOG(2) << "Canonical views clustering time (secs): "
+          << time(NULL) - start_time;
+}
+
+// Implementation of CanonicalViewsClustering
+void CanonicalViewsClustering::ComputeClustering(
+    const CanonicalViewsClusteringOptions& options,
+    const WeightedGraph<int>& graph,
+    vector<int>* centers,
+    IntMap* membership) {
+  options_ = options;
+  CHECK(centers != nullptr);
+  CHECK(membership != nullptr);
+  centers->clear();
+  membership->clear();
+  graph_ = &graph;
+
+  IntSet valid_views;
+  FindValidViews(&valid_views);
+  while (valid_views.size() > 0) {
+    // Find the next best canonical view.
+    double best_difference = -std::numeric_limits<double>::max();
+    int best_view = 0;
+
+    // TODO(sameeragarwal): Make this loop multi-threaded.
+    for (const auto& view : valid_views) {
+      const double difference =
+          ComputeClusteringQualityDifference(view, *centers);
+      if (difference > best_difference) {
+        best_difference = difference;
+        best_view = view;
+      }
+    }
+
+    CHECK_GT(best_difference, -std::numeric_limits<double>::max());
+
+    // Add canonical view if quality improves, or if minimum is not
+    // yet met, otherwise break.
+    if ((best_difference <= 0) &&
+        (centers->size() >= options_.min_views)) {
+      break;
+    }
+
+    centers->push_back(best_view);
+    valid_views.erase(best_view);
+    UpdateCanonicalViewAssignments(best_view);
+  }
+
+  ComputeClusterMembership(*centers, membership);
+}
+
+// Return the set of vertices of the graph which have valid vertex
+// weights.
+void CanonicalViewsClustering::FindValidViews(
+    IntSet* valid_views) const {
+  const IntSet& views = graph_->vertices();
+  for (const auto& view : views) {
+    if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
+      valid_views->insert(view);
+    }
+  }
+}
+
+// Computes the difference in the quality score if 'candidate' were
+// added to the set of canonical views.
+double CanonicalViewsClustering::ComputeClusteringQualityDifference(
+    const int candidate,
+    const vector<int>& centers) const {
+  // View score.
+  double difference =
+      options_.view_score_weight * graph_->VertexWeight(candidate);
+
+  // Compute how much the quality score changes if the candidate view
+  // was added to the list of canonical views and its nearest
+  // neighbors became members of its cluster.
+  const IntSet& neighbors = graph_->Neighbors(candidate);
+  for (const auto& neighbor : neighbors) {
+    const double old_similarity =
+        FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
+    const double new_similarity = graph_->EdgeWeight(neighbor, candidate);
+    if (new_similarity > old_similarity) {
+      difference += new_similarity - old_similarity;
+    }
+  }
+
+  // Number of views penalty.
+  difference -= options_.size_penalty_weight;
+
+  // Orthogonality.
+  for (int i = 0; i < centers.size(); ++i) {
+    difference -= options_.similarity_penalty_weight *
+        graph_->EdgeWeight(centers[i], candidate);
+  }
+
+  return difference;
+}
+
+// Reassign views if they're more similar to the new canonical view.
+void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
+    const int canonical_view) {
+  const IntSet& neighbors = graph_->Neighbors(canonical_view);
+  for (const auto& neighbor : neighbors) {
+    const double old_similarity =
+        FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
+    const double new_similarity =
+        graph_->EdgeWeight(neighbor, canonical_view);
+    if (new_similarity > old_similarity) {
+      view_to_canonical_view_[neighbor] = canonical_view;
+      view_to_canonical_view_similarity_[neighbor] = new_similarity;
+    }
+  }
+}
+
+// Assign a cluster id to each view.
+void CanonicalViewsClustering::ComputeClusterMembership(
+    const vector<int>& centers,
+    IntMap* membership) const {
+  CHECK(membership != nullptr);
+  membership->clear();
+
+  // The i^th cluster has cluster id i.
+  IntMap center_to_cluster_id;
+  for (int i = 0; i < centers.size(); ++i) {
+    center_to_cluster_id[centers[i]] = i;
+  }
+
+  static const int kInvalidClusterId = -1;
+
+  const IntSet& views = graph_->vertices();
+  for (const auto& view : views) {
+    auto it = view_to_canonical_view_.find(view);
+    int cluster_id = kInvalidClusterId;
+    if (it != view_to_canonical_view_.end()) {
+      cluster_id = FindOrDie(center_to_cluster_id, it->second);
+    }
+
+    InsertOrDie(membership, view, cluster_id);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/canonical_views_clustering.h b/internal/ceres/canonical_views_clustering.h
new file mode 100644
index 0000000..630adfe
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering.h
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// An implementation of the Canonical Views clustering algorithm from
+// "Scene Summarization for Online Image Collections", Ian Simon, Noah
+// Snavely, Steven M. Seitz, ICCV 2007.
+//
+// More details can be found at
+// http://grail.cs.washington.edu/projects/canonview/
+//
+// Ceres uses this algorithm to perform view clustering for
+// constructing visibility based preconditioners.
+
+#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+
+#include <unordered_map>
+#include <vector>
+
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct CanonicalViewsClusteringOptions;
+
+// Compute a partitioning of the vertices of the graph using the
+// canonical views clustering algorithm.
+//
+// In the following we will use the terms vertices and views
+// interchangeably.  Given a weighted Graph G(V,E), the canonical views
+// of G are the set of vertices that best "summarize" the content
+// of the graph. If w_ij i s the weight connecting the vertex i to
+// vertex j, and C is the set of canonical views. Then the objective
+// of the canonical views algorithm is
+//
+//   E[C] = sum_[i in V] max_[j in C] w_ij
+//          - size_penalty_weight * |C|
+//          - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+//
+// alpha is the size penalty that penalizes large number of canonical
+// views.
+//
+// beta is the similarity penalty that penalizes canonical views that
+// are too similar to other canonical views.
+//
+// Thus the canonical views algorithm tries to find a canonical view
+// for each vertex in the graph which best explains it, while trying
+// to minimize the number of canonical views and the overlap between
+// them.
+//
+// We further augment the above objective function by allowing for per
+// vertex weights, higher weights indicating a higher preference for
+// being chosen as a canonical view. Thus if w_i is the vertex weight
+// for vertex i, the objective function is then
+//
+//   E[C] = sum_[i in V] max_[j in C] w_ij
+//          - size_penalty_weight * |C|
+//          - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+//          + view_score_weight * sum_[i in C] w_i
+//
+// centers will contain the vertices that are the identified
+// as the canonical views/cluster centers, and membership is a map
+// from vertices to cluster_ids. The i^th cluster center corresponds
+// to the i^th cluster.
+//
+// It is possible depending on the configuration of the clustering
+// algorithm that some of the vertices may not be assigned to any
+// cluster. In this case they are assigned to a cluster with id = -1;
+void ComputeCanonicalViewsClustering(
+    const CanonicalViewsClusteringOptions& options,
+    const WeightedGraph<int>& graph,
+    std::vector<int>* centers,
+    std::unordered_map<int, int>* membership);
+
+struct CanonicalViewsClusteringOptions {
+  // The minimum number of canonical views to compute.
+  int min_views = 3;
+
+  // Penalty weight for the number of canonical views.  A higher
+  // number will result in fewer canonical views.
+  double size_penalty_weight = 5.75;
+
+  // Penalty weight for the diversity (orthogonality) of the
+  // canonical views.  A higher number will encourage less similar
+  // canonical views.
+  double similarity_penalty_weight = 100;
+
+  // Weight for per-view scores.  Lower weight places less
+  // confidence in the view scores.
+  double view_score_weight = 0.0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
diff --git a/internal/ceres/canonical_views_clustering_test.cc b/internal/ceres/canonical_views_clustering_test.cc
new file mode 100644
index 0000000..a8db293
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering_test.cc
@@ -0,0 +1,143 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sameer Agarwal (sameeragarwal@google.com)
+//         David Gallup (dgallup@google.com)
+
+#include "ceres/canonical_views_clustering.h"
+
+#include <unordered_map>
+#include "ceres/graph.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const int kVertexIds[] = {0, 1, 2, 3};
+class CanonicalViewsTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // The graph structure is as follows.
+    //
+    // Vertex weights:   0      2      2      0
+    //                   V0-----V1-----V2-----V3
+    // Edge weights:        0.8    0.9    0.3
+    const double kVertexWeights[] = {0.0, 2.0, 2.0, -1.0};
+    for (int i = 0; i < 4; ++i) {
+      graph_.AddVertex(i, kVertexWeights[i]);
+    }
+    // Create self edges.
+    // CanonicalViews requires that every view "sees" itself.
+    for (int i = 0; i < 4; ++i) {
+      graph_.AddEdge(i, i, 1.0);
+    }
+
+    // Create three edges.
+    const double kEdgeWeights[] = {0.8, 0.9, 0.3};
+    for (int i = 0; i < 3; ++i) {
+      // The graph interface is directed, so remember to create both
+      // edges.
+      graph_.AddEdge(kVertexIds[i], kVertexIds[i + 1], kEdgeWeights[i]);
+    }
+  }
+
+  void ComputeClustering() {
+    ComputeCanonicalViewsClustering(options_, graph_, &centers_, &membership_);
+  }
+
+  WeightedGraph<int> graph_;
+
+  CanonicalViewsClusteringOptions options_;
+  std::vector<int> centers_;
+  std::unordered_map<int, int> membership_;
+};
+
+TEST_F(CanonicalViewsTest, ComputeCanonicalViewsTest) {
+  options_.min_views = 0;
+  options_.size_penalty_weight = 0.5;
+  options_.similarity_penalty_weight = 0.0;
+  options_.view_score_weight = 0.0;
+  ComputeClustering();
+
+  // 2 canonical views.
+  EXPECT_EQ(centers_.size(), 2);
+  EXPECT_EQ(centers_[0], kVertexIds[1]);
+  EXPECT_EQ(centers_[1], kVertexIds[3]);
+
+  // Check cluster membership.
+  EXPECT_EQ(FindOrDie(membership_, kVertexIds[0]), 0);
+  EXPECT_EQ(FindOrDie(membership_, kVertexIds[1]), 0);
+  EXPECT_EQ(FindOrDie(membership_, kVertexIds[2]), 0);
+  EXPECT_EQ(FindOrDie(membership_, kVertexIds[3]), 1);
+}
+
+// Increases size penalty so the second canonical view won't be
+// chosen.
+TEST_F(CanonicalViewsTest, SizePenaltyTest) {
+  options_.min_views = 0;
+  options_.size_penalty_weight = 2.0;
+  options_.similarity_penalty_weight = 0.0;
+  options_.view_score_weight = 0.0;
+  ComputeClustering();
+
+  // 1 canonical view.
+  EXPECT_EQ(centers_.size(), 1);
+  EXPECT_EQ(centers_[0], kVertexIds[1]);
+}
+
+
+// Increases view score weight so vertex 2 will be chosen.
+TEST_F(CanonicalViewsTest, ViewScoreTest) {
+  options_.min_views = 0;
+  options_.size_penalty_weight = 0.5;
+  options_.similarity_penalty_weight = 0.0;
+  options_.view_score_weight = 1.0;
+  ComputeClustering();
+
+  // 2 canonical views.
+  EXPECT_EQ(centers_.size(), 2);
+  EXPECT_EQ(centers_[0], kVertexIds[1]);
+  EXPECT_EQ(centers_[1], kVertexIds[2]);
+}
+
+// Increases similarity penalty so vertex 2 won't be chosen despite
+// it's view score.
+TEST_F(CanonicalViewsTest, SimilarityPenaltyTest) {
+  options_.min_views = 0;
+  options_.size_penalty_weight = 0.5;
+  options_.similarity_penalty_weight = 3.0;
+  options_.view_score_weight = 1.0;
+  ComputeClustering();
+
+  // 2 canonical views.
+  EXPECT_EQ(centers_.size(), 1);
+  EXPECT_EQ(centers_[0], kVertexIds[1]);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/casts.h b/internal/ceres/casts.h
new file mode 100644
index 0000000..f18fdea
--- /dev/null
+++ b/internal/ceres/casts.h
@@ -0,0 +1,108 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CASTS_H_
+#define CERES_INTERNAL_CASTS_H_
+
+#include <cassert>
+#include <cstddef>  // For NULL.
+
+namespace ceres {
+
+// Identity metafunction.
+template <class T>
+struct identity_ {
+  typedef T type;
+};
+
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for implicit conversions. For example:
+// - Upcasting in a type hierarchy.
+// - Performing arithmetic conversions (int32 to int64, int to double, etc.).
+// - Adding const or volatile qualifiers.
+//
+// In general, implicit_cast can be used to convert this code
+//   To to = from;
+//   DoSomething(to);
+// to this
+//   DoSomething(implicit_cast<To>(from));
+//
+// base::identity_ is used to make a non-deduced context, which
+// forces all callers to explicitly specify the template argument.
+template<typename To>
+inline To implicit_cast(typename identity_<To>::type to) {
+  return to;
+}
+
+// This version of implicit_cast is used when two template arguments
+// are specified. It's obsolete and should not be used.
+template<typename To, typename From>
+inline To implicit_cast(typename identity_<From>::type const &f) {
+  return f;
+}
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts
+// always succeed.  When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo?  It
+// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,
+// when you downcast, you should use this macro.  In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not).  In normal mode, we do the efficient static_cast<>
+// instead.  Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+//    This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+
+template<typename To, typename From>     // use like this: down_cast<T*>(foo);
+inline To down_cast(From* f) {                   // so we only accept pointers
+  // Ensures that To is a sub-type of From *.  This test is here only
+  // for compile-time type checking, and has no overhead in an
+  // optimized build at run-time, as it will be optimized away
+  // completely.
+
+  // TODO(csilvers): This should use COMPILE_ASSERT.
+  if (false) {
+    implicit_cast<From*, To>(NULL);
+  }
+
+  // uses RTTI in dbg and fastbuild. asserts are disabled in opt builds.
+  assert(f == NULL || dynamic_cast<To>(f) != NULL);  // NOLINT
+  return static_cast<To>(f);
+}
+
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CASTS_H_
diff --git a/internal/ceres/cgnr_linear_operator.h b/internal/ceres/cgnr_linear_operator.h
new file mode 100644
index 0000000..ad0c627
--- /dev/null
+++ b/internal/ceres/cgnr_linear_operator.h
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
+#define CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
+
+#include <algorithm>
+#include <memory>
+#include "ceres/linear_operator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrix;
+
+// A linear operator which takes a matrix A and a diagonal vector D and
+// performs products of the form
+//
+//   (A^T A + D^T D)x
+//
+// This is used to implement iterative general sparse linear solving with
+// conjugate gradients, where A is the Jacobian and D is a regularizing
+// parameter. A brief proof that D^T D is the correct regularizer:
+//
+// Given a regularized least squares problem:
+//
+//   min  ||Ax - b||^2 + ||Dx||^2
+//    x
+//
+// First expand into matrix notation:
+//
+//   (Ax - b)^T (Ax - b) + xD^TDx
+//
+// Then multiply out to get:
+//
+//   = xA^TAx - 2b^T Ax + b^Tb + xD^TDx
+//
+// Take the derivative:
+//
+//   0 = 2A^TAx - 2A^T b + 2 D^TDx
+//   0 = A^TAx - A^T b + D^TDx
+//   0 = (A^TA + D^TD)x - A^T b
+//
+// Thus, the symmetric system we need to solve for CGNR is
+//
+//   Sx = z
+//
+// with S = A^TA + D^TD
+//  and z = A^T b
+//
+// Note: This class is not thread safe, since it uses some temporary storage.
+class CgnrLinearOperator : public LinearOperator {
+ public:
+  CgnrLinearOperator(const LinearOperator& A, const double *D)
+      : A_(A), D_(D), z_(new double[A.num_rows()]) {
+  }
+  virtual ~CgnrLinearOperator() {}
+
+  virtual void RightMultiply(const double* x, double* y) const {
+    std::fill(z_.get(), z_.get() + A_.num_rows(), 0.0);
+
+    // z = Ax
+    A_.RightMultiply(x, z_.get());
+
+    // y = y + Atz
+    A_.LeftMultiply(z_.get(), y);
+
+    // y = y + DtDx
+    if (D_ != NULL) {
+      int n = A_.num_cols();
+      VectorRef(y, n).array() += ConstVectorRef(D_, n).array().square() *
+                                 ConstVectorRef(x, n).array();
+    }
+  }
+
+  virtual void LeftMultiply(const double* x, double* y) const {
+    RightMultiply(x, y);
+  }
+
+  virtual int num_rows() const { return A_.num_cols(); }
+  virtual int num_cols() const { return A_.num_cols(); }
+
+ private:
+  const LinearOperator& A_;
+  const double* D_;
+  std::unique_ptr<double[]> z_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
new file mode 100644
index 0000000..463fbbd
--- /dev/null
+++ b/internal/ceres/cgnr_solver.cc
@@ -0,0 +1,89 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/cgnr_solver.h"
+
+#include "ceres/block_jacobi_preconditioner.h"
+#include "ceres/cgnr_linear_operator.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
+  : options_(options) {
+  if (options_.preconditioner_type != JACOBI &&
+      options_.preconditioner_type != IDENTITY) {
+    LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
+  }
+}
+
+CgnrSolver::~CgnrSolver() {}
+
+LinearSolver::Summary CgnrSolver::SolveImpl(
+    BlockSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("CgnrSolver::Solve");
+
+  // Form z = Atb.
+  Vector z(A->num_cols());
+  z.setZero();
+  A->LeftMultiply(b, z.data());
+
+  // Precondition if necessary.
+  LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
+  if (options_.preconditioner_type == JACOBI) {
+    if (preconditioner_.get() == NULL) {
+      preconditioner_.reset(new BlockJacobiPreconditioner(*A));
+    }
+    preconditioner_->Update(*A, per_solve_options.D);
+    cg_per_solve_options.preconditioner = preconditioner_.get();
+  }
+
+  // Solve (AtA + DtD)x = z (= Atb).
+  VectorRef(x, A->num_cols()).setZero();
+  CgnrLinearOperator lhs(*A, per_solve_options.D);
+  event_logger.AddEvent("Setup");
+
+  ConjugateGradientsSolver conjugate_gradient_solver(options_);
+  LinearSolver::Summary summary =
+      conjugate_gradient_solver.Solve(&lhs, z.data(), cg_per_solve_options, x);
+  event_logger.AddEvent("Solve");
+  return summary;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
new file mode 100644
index 0000000..0bd1883
--- /dev/null
+++ b/internal/ceres/cgnr_solver.h
@@ -0,0 +1,72 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CGNR_SOLVER_H_
+#define CERES_INTERNAL_CGNR_SOLVER_H_
+
+#include <memory>
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class Preconditioner;
+
+class BlockJacobiPreconditioner;
+
+// A conjugate gradients on the normal equations solver. This directly solves
+// for the solution to
+//
+//   (A^T A + D^T D)x = A^T b
+//
+// as required for solving for x in the least squares sense. Currently only
+// block diagonal preconditioning is supported.
+class CgnrSolver : public BlockSparseMatrixSolver {
+ public:
+  explicit CgnrSolver(const LinearSolver::Options& options);
+  CgnrSolver(const CgnrSolver&) = delete;
+  void operator=(const CgnrSolver&) = delete;
+  virtual ~CgnrSolver();
+
+  virtual Summary SolveImpl(
+      BlockSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+ private:
+  const LinearSolver::Options options_;
+  std::unique_ptr<Preconditioner> preconditioner_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CGNR_SOLVER_H_
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils.cc b/internal/ceres/compressed_col_sparse_matrix_utils.cc
new file mode 100644
index 0000000..3f6672f
--- /dev/null
+++ b/internal/ceres/compressed_col_sparse_matrix_utils.cc
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+
+#include <vector>
+#include <algorithm>
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+void CompressedColumnScalarMatrixToBlockMatrix(
+    const int* scalar_rows,
+    const int* scalar_cols,
+    const vector<int>& row_blocks,
+    const vector<int>& col_blocks,
+    vector<int>* block_rows,
+    vector<int>* block_cols) {
+  CHECK(block_rows != nullptr);
+  CHECK(block_cols != nullptr);
+  block_rows->clear();
+  block_cols->clear();
+  const int num_row_blocks = row_blocks.size();
+  const int num_col_blocks = col_blocks.size();
+
+  vector<int> row_block_starts(num_row_blocks);
+  for (int i = 0, cursor = 0; i < num_row_blocks; ++i) {
+    row_block_starts[i] = cursor;
+    cursor += row_blocks[i];
+  }
+
+  // This loop extracts the block sparsity of the scalar sparse matrix
+  // It does so by iterating over the columns, but only considering
+  // the columns corresponding to the first element of each column
+  // block. Within each column, the inner loop iterates over the rows,
+  // and detects the presence of a row block by checking for the
+  // presence of a non-zero entry corresponding to its first element.
+  block_cols->push_back(0);
+  int c = 0;
+  for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
+    int column_size = 0;
+    for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
+      vector<int>::const_iterator it =
+          std::lower_bound(row_block_starts.begin(),
+                           row_block_starts.end(),
+                           scalar_rows[idx]);
+      // Since we are using lower_bound, it will return the row id
+      // where the row block starts. For everything but the first row
+      // of the block, where these values will be the same, we can
+      // skip, as we only need the first row to detect the presence of
+      // the block.
+      //
+      // For rows all but the first row in the last row block,
+      // lower_bound will return row_block_starts.end(), but those can
+      // be skipped like the rows in other row blocks too.
+      if (it == row_block_starts.end() || *it != scalar_rows[idx]) {
+        continue;
+      }
+
+      block_rows->push_back(it - row_block_starts.begin());
+      ++column_size;
+    }
+    block_cols->push_back(block_cols->back() + column_size);
+    c += col_blocks[col_block];
+  }
+}
+
+void BlockOrderingToScalarOrdering(const vector<int>& blocks,
+                                   const vector<int>& block_ordering,
+                                   vector<int>* scalar_ordering) {
+  CHECK_EQ(blocks.size(), block_ordering.size());
+  const int num_blocks = blocks.size();
+
+  // block_starts = [0, block1, block1 + block2 ..]
+  vector<int> block_starts(num_blocks);
+  for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
+    block_starts[i] = cursor;
+    cursor += blocks[i];
+  }
+
+  scalar_ordering->resize(block_starts.back() + blocks.back());
+  int cursor = 0;
+  for (int i = 0; i < num_blocks; ++i) {
+    const int block_id = block_ordering[i];
+    const int block_size = blocks[block_id];
+    int block_position = block_starts[block_id];
+    for (int j = 0; j < block_size; ++j) {
+      (*scalar_ordering)[cursor++] = block_position++;
+    }
+  }
+}
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils.h b/internal/ceres/compressed_col_sparse_matrix_utils.h
new file mode 100644
index 0000000..da2109f
--- /dev/null
+++ b/internal/ceres/compressed_col_sparse_matrix_utils.h
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
+#define CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Extract the block sparsity pattern of the scalar compressed columns
+// matrix and return it in compressed column form. The compressed
+// column form is stored in two vectors block_rows, and block_cols,
+// which correspond to the row and column arrays in a compressed
+// column sparse matrix.
+//
+// If c_ij is the block in the matrix A corresponding to row block i
+// and column block j, then it is expected that A contains at least
+// one non-zero entry corresponding to the top left entry of c_ij,
+// as that entry is used to detect the presence of a non-zero c_ij.
+void CompressedColumnScalarMatrixToBlockMatrix(
+    const int* scalar_rows,
+    const int* scalar_cols,
+    const std::vector<int>& row_blocks,
+    const std::vector<int>& col_blocks,
+    std::vector<int>* block_rows,
+    std::vector<int>* block_cols);
+
+// Given a set of blocks and a permutation of these blocks, compute
+// the corresponding "scalar" ordering, where the scalar ordering of
+// size sum(blocks).
+void BlockOrderingToScalarOrdering(
+    const std::vector<int>& blocks,
+    const std::vector<int>& block_ordering,
+    std::vector<int>* scalar_ordering);
+
+// Solve the linear system
+//
+//   R * solution = rhs
+//
+// Where R is an upper triangular compressed column sparse matrix.
+template <typename IntegerType>
+void SolveUpperTriangularInPlace(IntegerType num_cols,
+                                 const IntegerType* rows,
+                                 const IntegerType* cols,
+                                 const double* values,
+                                 double* rhs_and_solution) {
+  for (IntegerType c = num_cols - 1; c >= 0; --c) {
+    rhs_and_solution[c] /= values[cols[c + 1] - 1];
+    for (IntegerType idx = cols[c]; idx < cols[c + 1] - 1; ++idx) {
+      const IntegerType r = rows[idx];
+      const double v = values[idx];
+      rhs_and_solution[r] -= v * rhs_and_solution[c];
+    }
+  }
+}
+
+// Solve the linear system
+//
+//   R' * solution = rhs
+//
+// Where R is an upper triangular compressed column sparse matrix.
+template <typename IntegerType>
+void SolveUpperTriangularTransposeInPlace(IntegerType num_cols,
+                                          const IntegerType* rows,
+                                          const IntegerType* cols,
+                                          const double* values,
+                                          double* rhs_and_solution) {
+  for (IntegerType c = 0; c < num_cols; ++c) {
+    for (IntegerType idx = cols[c]; idx < cols[c + 1] - 1; ++idx) {
+      const IntegerType r = rows[idx];
+      const double v = values[idx];
+      rhs_and_solution[c] -= v * rhs_and_solution[r];
+    }
+    rhs_and_solution[c] =  rhs_and_solution[c] / values[cols[c + 1] - 1];
+  }
+}
+
+// Given a upper triangular matrix R in compressed column form, solve
+// the linear system,
+//
+//  R'R x = b
+//
+// Where b is all zeros except for rhs_nonzero_index, where it is
+// equal to one.
+//
+// The function exploits this knowledge to reduce the number of
+// floating point operations.
+template <typename IntegerType>
+void SolveRTRWithSparseRHS(IntegerType num_cols,
+                           const IntegerType* rows,
+                           const IntegerType* cols,
+                           const double* values,
+                           const int rhs_nonzero_index,
+                           double* solution) {
+  std::fill(solution, solution + num_cols, 0.0);
+  solution[rhs_nonzero_index] = 1.0 / values[cols[rhs_nonzero_index + 1] - 1];
+
+  for (IntegerType c = rhs_nonzero_index + 1; c < num_cols; ++c) {
+    for (IntegerType idx = cols[c]; idx < cols[c + 1] - 1; ++idx) {
+      const IntegerType r = rows[idx];
+      if (r < rhs_nonzero_index) continue;
+      const double v = values[idx];
+      solution[c] -= v * solution[r];
+    }
+    solution[c] =  solution[c] / values[cols[c + 1] - 1];
+  }
+
+  SolveUpperTriangularInPlace(num_cols, rows, cols, values, solution);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
diff --git a/internal/ceres/compressed_col_sparse_matrix_utils_test.cc b/internal/ceres/compressed_col_sparse_matrix_utils_test.cc
new file mode 100644
index 0000000..2162b9f
--- /dev/null
+++ b/internal/ceres/compressed_col_sparse_matrix_utils_test.cc
@@ -0,0 +1,255 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+
+#include <algorithm>
+#include <numeric>
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/internal/port.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+#include "Eigen/SparseCore"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+TEST(_, BlockPermutationToScalarPermutation) {
+  vector<int> blocks;
+  //  Block structure
+  //  0  --1-  ---2---  ---3---  4
+  // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+  blocks.push_back(1);
+  blocks.push_back(2);
+  blocks.push_back(3);
+  blocks.push_back(3);
+  blocks.push_back(1);
+
+  // Block ordering
+  // [1, 0, 2, 4, 5]
+  vector<int> block_ordering;
+  block_ordering.push_back(1);
+  block_ordering.push_back(0);
+  block_ordering.push_back(2);
+  block_ordering.push_back(4);
+  block_ordering.push_back(3);
+
+  // Expected ordering
+  // [1, 2, 0, 3, 4, 5, 9, 6, 7, 8]
+  vector<int> expected_scalar_ordering;
+  expected_scalar_ordering.push_back(1);
+  expected_scalar_ordering.push_back(2);
+  expected_scalar_ordering.push_back(0);
+  expected_scalar_ordering.push_back(3);
+  expected_scalar_ordering.push_back(4);
+  expected_scalar_ordering.push_back(5);
+  expected_scalar_ordering.push_back(9);
+  expected_scalar_ordering.push_back(6);
+  expected_scalar_ordering.push_back(7);
+  expected_scalar_ordering.push_back(8);
+
+  vector<int> scalar_ordering;
+  BlockOrderingToScalarOrdering(blocks,
+                                block_ordering,
+                                &scalar_ordering);
+  EXPECT_EQ(scalar_ordering.size(), expected_scalar_ordering.size());
+  for (int i = 0; i < expected_scalar_ordering.size(); ++i) {
+    EXPECT_EQ(scalar_ordering[i], expected_scalar_ordering[i]);
+  }
+}
+
+void FillBlock(const vector<int>& row_blocks,
+               const vector<int>& col_blocks,
+               const int row_block_id,
+               const int col_block_id,
+               vector<Eigen::Triplet<double>>* triplets) {
+  const int row_offset = std::accumulate(&row_blocks[0], &row_blocks[row_block_id], 0);
+  const int col_offset = std::accumulate(&col_blocks[0], &col_blocks[col_block_id], 0);
+  for (int r = 0; r < row_blocks[row_block_id]; ++r) {
+    for (int c = 0; c < col_blocks[col_block_id]; ++c) {
+      triplets->push_back(Eigen::Triplet<double>(row_offset + r, col_offset + c, 1.0));
+    }
+  }
+}
+
+TEST(_, ScalarMatrixToBlockMatrix) {
+  // Block sparsity.
+  //
+  //     [1 2 3 2]
+  // [1]  x   x
+  // [2]    x   x
+  // [2]  x x
+  // num_nonzeros = 1 + 3 + 4 + 4 + 1 + 2 = 15
+
+
+  vector<int> col_blocks;
+  col_blocks.push_back(1);
+  col_blocks.push_back(2);
+  col_blocks.push_back(3);
+  col_blocks.push_back(2);
+
+  vector<int> row_blocks;
+  row_blocks.push_back(1);
+  row_blocks.push_back(2);
+  row_blocks.push_back(2);
+
+  const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0.0);
+  const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0.0);
+
+  vector<Eigen::Triplet<double>> triplets;
+  FillBlock(row_blocks, col_blocks, 0, 0, &triplets);
+  FillBlock(row_blocks, col_blocks, 2, 0, &triplets);
+  FillBlock(row_blocks, col_blocks, 1, 1, &triplets);
+  FillBlock(row_blocks, col_blocks, 2, 1, &triplets);
+  FillBlock(row_blocks, col_blocks, 0, 2, &triplets);
+  FillBlock(row_blocks, col_blocks, 1, 3, &triplets);
+  Eigen::SparseMatrix<double> sparse_matrix(num_rows, num_cols);
+  sparse_matrix.setFromTriplets(triplets.begin(), triplets.end());
+
+  vector<int> expected_compressed_block_rows;
+  expected_compressed_block_rows.push_back(0);
+  expected_compressed_block_rows.push_back(2);
+  expected_compressed_block_rows.push_back(1);
+  expected_compressed_block_rows.push_back(2);
+  expected_compressed_block_rows.push_back(0);
+  expected_compressed_block_rows.push_back(1);
+
+  vector<int> expected_compressed_block_cols;
+  expected_compressed_block_cols.push_back(0);
+  expected_compressed_block_cols.push_back(2);
+  expected_compressed_block_cols.push_back(4);
+  expected_compressed_block_cols.push_back(5);
+  expected_compressed_block_cols.push_back(6);
+
+  vector<int> compressed_block_rows;
+  vector<int> compressed_block_cols;
+  CompressedColumnScalarMatrixToBlockMatrix(
+      sparse_matrix.innerIndexPtr(),
+      sparse_matrix.outerIndexPtr(),
+      row_blocks,
+      col_blocks,
+      &compressed_block_rows,
+      &compressed_block_cols);
+
+  EXPECT_EQ(compressed_block_rows, expected_compressed_block_rows);
+  EXPECT_EQ(compressed_block_cols, expected_compressed_block_cols);
+}
+
+class SolveUpperTriangularTest : public ::testing::Test {
+ protected:
+  void SetUp() {
+    cols.resize(5);
+    rows.resize(7);
+    values.resize(7);
+
+    cols[0] = 0;
+    rows[0] = 0;
+    values[0] = 0.50754;
+
+    cols[1] = 1;
+    rows[1] = 1;
+    values[1] = 0.80483;
+
+    cols[2] = 2;
+    rows[2] = 1;
+    values[2] = 0.14120;
+    rows[3] = 2;
+    values[3] = 0.3;
+
+    cols[3] = 4;
+    rows[4] = 0;
+    values[4] = 0.77696;
+    rows[5] = 1;
+    values[5] = 0.41860;
+    rows[6] = 3;
+    values[6] = 0.88979;
+
+    cols[4] = 7;
+  }
+
+  vector<int> cols;
+  vector<int> rows;
+  vector<double> values;
+};
+
+TEST_F(SolveUpperTriangularTest, SolveInPlace) {
+  double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
+  const double expected[] = { -1.4706, -1.0962, 6.6667, 2.2477};
+
+  SolveUpperTriangularInPlace<int>(cols.size() - 1,
+                                   &rows[0],
+                                   &cols[0],
+                                   &values[0],
+                                   rhs_and_solution);
+
+  for (int i = 0; i < 4; ++i) {
+    EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
+  }
+}
+
+TEST_F(SolveUpperTriangularTest, TransposeSolveInPlace) {
+  double rhs_and_solution[] = {1.0, 1.0, 2.0, 2.0};
+  double expected[] = {1.970288,  1.242498,  6.081864, -0.057255};
+
+  SolveUpperTriangularTransposeInPlace<int>(cols.size() - 1,
+                                            &rows[0],
+                                            &cols[0],
+                                            &values[0],
+                                            rhs_and_solution);
+
+  for (int i = 0; i < 4; ++i) {
+    EXPECT_NEAR(rhs_and_solution[i], expected[i], 1e-4) << i;
+  }
+}
+
+TEST_F(SolveUpperTriangularTest, RTRSolveWithSparseRHS) {
+  double solution[4];
+  double expected[] = { 6.8420e+00,   1.0057e+00,  -1.4907e-16,  -1.9335e+00,
+                        1.0057e+00,   2.2275e+00,  -1.9493e+00,  -6.5693e-01,
+                        -1.4907e-16,  -1.9493e+00,   1.1111e+01,   9.7381e-17,
+                        -1.9335e+00,  -6.5693e-01,   9.7381e-17,   1.2631e+00 };
+
+  for (int i = 0; i < 4; ++i) {
+    SolveRTRWithSparseRHS<int>(cols.size() - 1,
+                               &rows[0],
+                               &cols[0],
+                               &values[0],
+                               i,
+                               solution);
+    for (int j = 0; j < 4; ++j) {
+      EXPECT_NEAR(solution[j], expected[4 * i + j], 1e-3) << i;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
new file mode 100644
index 0000000..1fc0116
--- /dev/null
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -0,0 +1,245 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/compressed_row_jacobian_writer.h"
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::vector;
+using std::adjacent_find;
+
+void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
+    const Program* program, CompressedRowSparseMatrix* jacobian) {
+  const vector<ParameterBlock*>& parameter_blocks =
+      program->parameter_blocks();
+  vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
+  col_blocks.resize(parameter_blocks.size());
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    col_blocks[i] = parameter_blocks[i]->LocalSize();
+  }
+
+  const vector<ResidualBlock*>& residual_blocks =
+      program->residual_blocks();
+  vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
+  row_blocks.resize(residual_blocks.size());
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    row_blocks[i] = residual_blocks[i]->NumResiduals();
+  }
+}
+
+void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+      const Program* program,
+      int residual_id,
+      vector<pair<int, int>>* evaluated_jacobian_blocks) {
+  const ResidualBlock* residual_block =
+      program->residual_blocks()[residual_id];
+  const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+  for (int j = 0; j < num_parameter_blocks; ++j) {
+    const ParameterBlock* parameter_block =
+        residual_block->parameter_blocks()[j];
+    if (!parameter_block->IsConstant()) {
+      evaluated_jacobian_blocks->push_back(
+          make_pair(parameter_block->index(), j));
+    }
+  }
+  sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
+}
+
+SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
+  const vector<ResidualBlock*>& residual_blocks =
+      program_->residual_blocks();
+
+  int total_num_residuals = program_->NumResiduals();
+  int total_num_effective_parameters = program_->NumEffectiveParameters();
+
+  // Count the number of jacobian nonzeros.
+  int num_jacobian_nonzeros = 0;
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks[i];
+    const int num_residuals = residual_block->NumResiduals();
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      if (!parameter_block->IsConstant()) {
+        num_jacobian_nonzeros += num_residuals * parameter_block->LocalSize();
+      }
+    }
+  }
+
+  // Allocate storage for the jacobian with some extra space at the end.
+  // Allocate more space than needed to store the jacobian so that when the LM
+  // algorithm adds the diagonal, no reallocation is necessary. This reduces
+  // peak memory usage significantly.
+  CompressedRowSparseMatrix* jacobian =
+      new CompressedRowSparseMatrix(
+          total_num_residuals,
+          total_num_effective_parameters,
+          num_jacobian_nonzeros + total_num_effective_parameters);
+
+  // At this stage, the CompressedRowSparseMatrix is an invalid state. But this
+  // seems to be the only way to construct it without doing a memory copy.
+  int* rows = jacobian->mutable_rows();
+  int* cols = jacobian->mutable_cols();
+
+  int row_pos = 0;
+  rows[0] = 0;
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks[i];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+    // Count the number of derivatives for a row of this residual block and
+    // build a list of active parameter block indices.
+    int num_derivatives = 0;
+    vector<int> parameter_indices;
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      if (!parameter_block->IsConstant()) {
+        parameter_indices.push_back(parameter_block->index());
+        num_derivatives += parameter_block->LocalSize();
+      }
+    }
+
+    // Sort the parameters by their position in the state vector.
+    sort(parameter_indices.begin(), parameter_indices.end());
+    if (adjacent_find(parameter_indices.begin(), parameter_indices.end()) !=
+        parameter_indices.end()) {
+      std::string parameter_block_description;
+      for (int j = 0; j < num_parameter_blocks; ++j) {
+        ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+        parameter_block_description +=
+            parameter_block->ToString() + "\n";
+      }
+      LOG(FATAL) << "Ceres internal error: "
+                 << "Duplicate parameter blocks detected in a cost function. "
+                 << "This should never happen. Please report this to "
+                 << "the Ceres developers.\n"
+                 << "Residual Block: " << residual_block->ToString() << "\n"
+                 << "Parameter Blocks: " << parameter_block_description;
+    }
+
+    // Update the row indices.
+    const int num_residuals = residual_block->NumResiduals();
+    for (int j = 0; j < num_residuals; ++j) {
+      rows[row_pos + j + 1] = rows[row_pos + j] + num_derivatives;
+    }
+
+    // Iterate over parameter blocks in the order which they occur in the
+    // parameter vector. This code mirrors that in Write(), where jacobian
+    // values are updated.
+    int col_pos = 0;
+    for (int j = 0; j < parameter_indices.size(); ++j) {
+      ParameterBlock* parameter_block =
+          program_->parameter_blocks()[parameter_indices[j]];
+      const int parameter_block_size = parameter_block->LocalSize();
+
+      for (int r = 0; r < num_residuals; ++r) {
+        // This is the position in the values array of the jacobian where this
+        // row of the jacobian block should go.
+        const int column_block_begin = rows[row_pos + r] + col_pos;
+
+        for (int c = 0; c < parameter_block_size; ++c) {
+          cols[column_block_begin + c] = parameter_block->delta_offset() + c;
+        }
+      }
+      col_pos += parameter_block_size;
+    }
+    row_pos += num_residuals;
+  }
+  CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
+
+  PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
+
+  return jacobian;
+}
+
+void CompressedRowJacobianWriter::Write(int residual_id,
+                                        int residual_offset,
+                                        double **jacobians,
+                                        SparseMatrix* base_jacobian) {
+  CompressedRowSparseMatrix* jacobian =
+      down_cast<CompressedRowSparseMatrix*>(base_jacobian);
+
+  double* jacobian_values = jacobian->mutable_values();
+  const int* jacobian_rows = jacobian->rows();
+
+  const ResidualBlock* residual_block =
+      program_->residual_blocks()[residual_id];
+  const int num_residuals = residual_block->NumResiduals();
+
+  vector<pair<int, int>> evaluated_jacobian_blocks;
+  GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
+
+  // Where in the current row does the jacobian for a parameter block begin.
+  int col_pos = 0;
+
+  // Iterate over the jacobian blocks in increasing order of their
+  // positions in the reduced parameter vector.
+  for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+    const ParameterBlock* parameter_block =
+        program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
+    const int argument = evaluated_jacobian_blocks[i].second;
+    const int parameter_block_size = parameter_block->LocalSize();
+
+    // Copy one row of the jacobian block at a time.
+    for (int r = 0; r < num_residuals; ++r) {
+      // Position of the r^th row of the current jacobian block.
+      const double* block_row_begin =
+          jacobians[argument] + r * parameter_block_size;
+
+      // Position in the values array of the jacobian where this
+      // row of the jacobian block should go.
+      double* column_block_begin =
+          jacobian_values + jacobian_rows[residual_offset + r] + col_pos;
+
+      std::copy(block_row_begin,
+                block_row_begin + parameter_block_size,
+                column_block_begin);
+    }
+    col_pos += parameter_block_size;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
new file mode 100644
index 0000000..9fb414e
--- /dev/null
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -0,0 +1,112 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that directly writes to compressed row sparse matrices.
+
+#ifndef CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+
+#include <utility>
+#include <vector>
+
+#include "ceres/evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class Program;
+class SparseMatrix;
+
+class CompressedRowJacobianWriter {
+ public:
+  CompressedRowJacobianWriter(Evaluator::Options /* ignored */,
+                              Program* program)
+    : program_(program) {
+  }
+
+  // PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
+  // row_blocks for a CompressedRowSparseMatrix, based on the
+  // parameter block sizes and residual sizes respectively from the
+  // program. This is useful when Solver::Options::use_block_amd =
+  // true;
+  //
+  // This function is static so that it is available to other jacobian
+  // writers which use CompressedRowSparseMatrix (or derived types).
+  // (Jacobian writers do not fall under any type hierarchy; they only
+  // have to provide an interface as specified in program_evaluator.h).
+  static void PopulateJacobianRowAndColumnBlockVectors(
+      const Program* program,
+      CompressedRowSparseMatrix* jacobian);
+
+  // It is necessary to determine the order of the jacobian blocks
+  // before copying them into a CompressedRowSparseMatrix (or derived
+  // type).  Just because a cost function uses parameter blocks 1
+  // after 2 in its arguments does not mean that the block 1 occurs
+  // before block 2 in the column layout of the jacobian. Thus,
+  // GetOrderedParameterBlocks determines the order by sorting the
+  // jacobian blocks by their position in the state vector.
+  //
+  // This function is static so that it is available to other jacobian
+  // writers which use CompressedRowSparseMatrix (or derived types).
+  // (Jacobian writers do not fall under any type hierarchy; they only
+  // have to provide an interface as specified in
+  // program_evaluator.h).
+  static void GetOrderedParameterBlocks(
+      const Program* program,
+      int residual_id,
+      std::vector<std::pair<int, int>>* evaluated_jacobian_blocks);
+
+  // JacobianWriter interface.
+
+  // Since the compressed row matrix has different layout than that
+  // assumed by the cost functions, use scratch space to store the
+  // jacobians temporarily then copy them over to the larger jacobian
+  // in the Write() function.
+  ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
+    return ScratchEvaluatePreparer::Create(*program_, num_threads);
+  }
+
+  SparseMatrix* CreateJacobian() const;
+
+  void Write(int residual_id,
+             int residual_offset,
+             double **jacobians,
+             SparseMatrix* base_jacobian);
+
+ private:
+  Program* program_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
new file mode 100644
index 0000000..e56de16
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -0,0 +1,728 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/port.h"
+#include "ceres/random.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+namespace {
+
+// Helper functor used by the constructor for reordering the contents
+// of a TripletSparseMatrix. This comparator assumes thay there are no
+// duplicates in the pair of arrays rows and cols, i.e., there is no
+// indices i and j (not equal to each other) s.t.
+//
+//  rows[i] == rows[j] && cols[i] == cols[j]
+//
+// If this is the case, this functor will not be a StrictWeakOrdering.
+struct RowColLessThan {
+  RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
+
+  bool operator()(const int x, const int y) const {
+    if (rows[x] == rows[y]) {
+      return (cols[x] < cols[y]);
+    }
+    return (rows[x] < rows[y]);
+  }
+
+  const int* rows;
+  const int* cols;
+};
+
+void TransposeForCompressedRowSparseStructure(const int num_rows,
+                                              const int num_cols,
+                                              const int num_nonzeros,
+                                              const int* rows,
+                                              const int* cols,
+                                              const double* values,
+                                              int* transpose_rows,
+                                              int* transpose_cols,
+                                              double* transpose_values) {
+  // Explicitly zero out transpose_rows.
+  std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
+
+  // Count the number of entries in each column of the original matrix
+  // and assign to transpose_rows[col + 1].
+  for (int idx = 0; idx < num_nonzeros; ++idx) {
+    ++transpose_rows[cols[idx] + 1];
+  }
+
+  // Compute the starting position for each row in the transpose by
+  // computing the cumulative sum of the entries of transpose_rows.
+  for (int i = 1; i < num_cols + 1; ++i) {
+    transpose_rows[i] += transpose_rows[i - 1];
+  }
+
+  // Populate transpose_cols and (optionally) transpose_values by
+  // walking the entries of the source matrices. For each entry that
+  // is added, the value of transpose_row is incremented allowing us
+  // to keep track of where the next entry for that row should go.
+  //
+  // As a result transpose_row is shifted to the left by one entry.
+  for (int r = 0; r < num_rows; ++r) {
+    for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
+      const int c = cols[idx];
+      const int transpose_idx = transpose_rows[c]++;
+      transpose_cols[transpose_idx] = r;
+      if (values != NULL && transpose_values != NULL) {
+        transpose_values[transpose_idx] = values[idx];
+      }
+    }
+  }
+
+  // This loop undoes the left shift to transpose_rows introduced by
+  // the previous loop.
+  for (int i = num_cols - 1; i > 0; --i) {
+    transpose_rows[i] = transpose_rows[i - 1];
+  }
+  transpose_rows[0] = 0;
+}
+
+void AddRandomBlock(const int num_rows,
+                    const int num_cols,
+                    const int row_block_begin,
+                    const int col_block_begin,
+                    std::vector<int>* rows,
+                    std::vector<int>* cols,
+                    std::vector<double>* values) {
+  for (int r = 0; r < num_rows; ++r) {
+    for (int c = 0; c < num_cols; ++c) {
+      rows->push_back(row_block_begin + r);
+      cols->push_back(col_block_begin + c);
+      values->push_back(RandNormal());
+    }
+  }
+}
+
+void AddSymmetricRandomBlock(const int num_rows,
+                             const int row_block_begin,
+                             std::vector<int>* rows,
+                             std::vector<int>* cols,
+                             std::vector<double>* values) {
+  for (int r = 0; r < num_rows; ++r) {
+    for (int c = r; c < num_rows; ++c) {
+      const double v = RandNormal();
+      rows->push_back(row_block_begin + r);
+      cols->push_back(row_block_begin + c);
+      values->push_back(v);
+      if (r != c) {
+        rows->push_back(row_block_begin + c);
+        cols->push_back(row_block_begin + r);
+        values->push_back(v);
+      }
+    }
+  }
+}
+
+}  // namespace
+
+// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
+                                                     int num_cols,
+                                                     int max_num_nonzeros) {
+  num_rows_ = num_rows;
+  num_cols_ = num_cols;
+  storage_type_ = UNSYMMETRIC;
+  rows_.resize(num_rows + 1, 0);
+  cols_.resize(max_num_nonzeros, 0);
+  values_.resize(max_num_nonzeros, 0.0);
+
+  VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+          << " max_num_nonzeros: " << cols_.size() << ". Allocating "
+          << (num_rows_ + 1) * sizeof(int) +     // NOLINT
+                 cols_.size() * sizeof(int) +    // NOLINT
+                 cols_.size() * sizeof(double);  // NOLINT
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+    const TripletSparseMatrix& input) {
+  return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
+}
+
+CompressedRowSparseMatrix*
+CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
+    const TripletSparseMatrix& input) {
+  return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+    const TripletSparseMatrix& input, bool transpose) {
+  int num_rows = input.num_rows();
+  int num_cols = input.num_cols();
+  const int* rows = input.rows();
+  const int* cols = input.cols();
+  const double* values = input.values();
+
+  if (transpose) {
+    std::swap(num_rows, num_cols);
+    std::swap(rows, cols);
+  }
+
+  // index is the list of indices into the TripletSparseMatrix input.
+  vector<int> index(input.num_nonzeros(), 0);
+  for (int i = 0; i < input.num_nonzeros(); ++i) {
+    index[i] = i;
+  }
+
+  // Sort index such that the entries of m are ordered by row and ties
+  // are broken by column.
+  std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
+
+  VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
+          << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
+          << ((num_rows + 1) * sizeof(int) +           // NOLINT
+              input.num_nonzeros() * sizeof(int) +     // NOLINT
+              input.num_nonzeros() * sizeof(double));  // NOLINT
+
+  CompressedRowSparseMatrix* output =
+      new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
+
+  if (num_rows == 0) {
+    // No data to copy.
+    return output;
+  }
+
+  // Copy the contents of the cols and values array in the order given
+  // by index and count the number of entries in each row.
+  int* output_rows = output->mutable_rows();
+  int* output_cols = output->mutable_cols();
+  double* output_values = output->mutable_values();
+
+  output_rows[0] = 0;
+  for (int i = 0; i < index.size(); ++i) {
+    const int idx = index[i];
+    ++output_rows[rows[idx] + 1];
+    output_cols[i] = cols[idx];
+    output_values[i] = values[idx];
+  }
+
+  // Find the cumulative sum of the row counts.
+  for (int i = 1; i < num_rows + 1; ++i) {
+    output_rows[i] += output_rows[i - 1];
+  }
+
+  CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
+  return output;
+}
+
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
+                                                     int num_rows) {
+  CHECK(diagonal != nullptr);
+
+  num_rows_ = num_rows;
+  num_cols_ = num_rows;
+  storage_type_ = UNSYMMETRIC;
+  rows_.resize(num_rows + 1);
+  cols_.resize(num_rows);
+  values_.resize(num_rows);
+
+  rows_[0] = 0;
+  for (int i = 0; i < num_rows_; ++i) {
+    cols_[i] = i;
+    values_[i] = diagonal[i];
+    rows_[i + 1] = i + 1;
+  }
+
+  CHECK_EQ(num_nonzeros(), num_rows);
+}
+
+CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
+
+void CompressedRowSparseMatrix::SetZero() {
+  std::fill(values_.begin(), values_.end(), 0);
+}
+
+// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
+// block-aware for higher performance.
+void CompressedRowSparseMatrix::RightMultiply(const double* x,
+                                              double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+
+  if (storage_type_ == UNSYMMETRIC) {
+    for (int r = 0; r < num_rows_; ++r) {
+      for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+        const int c = cols_[idx];
+        const double v = values_[idx];
+        y[r] += v * x[c];
+      }
+    }
+  } else if (storage_type_ == UPPER_TRIANGULAR) {
+    // Because of their block structure, we will have entries that lie
+    // above (below) the diagonal for lower (upper) triangular matrices,
+    // so the loops below need to account for this.
+    for (int r = 0; r < num_rows_; ++r) {
+      int idx = rows_[r];
+      const int idx_end = rows_[r + 1];
+
+      // For upper triangular matrices r <= c, so skip entries with r
+      // > c.
+      while (idx < idx_end && r > cols_[idx]) {
+        ++idx;
+      }
+
+      for (; idx < idx_end; ++idx) {
+        const int c = cols_[idx];
+        const double v = values_[idx];
+        y[r] += v * x[c];
+        // Since we are only iterating over the upper triangular part
+        // of the matrix, add contributions for the strictly lower
+        // triangular part.
+        if (r != c) {
+          y[c] += v * x[r];
+        }
+      }
+    }
+  } else if (storage_type_ == LOWER_TRIANGULAR) {
+    for (int r = 0; r < num_rows_; ++r) {
+      int idx = rows_[r];
+      const int idx_end = rows_[r + 1];
+      // For lower triangular matrices, we only iterate till we are r >=
+      // c.
+      for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+        const int c = cols_[idx];
+        const double v = values_[idx];
+        y[r] += v * x[c];
+        // Since we are only iterating over the lower triangular part
+        // of the matrix, add contributions for the strictly upper
+        // triangular part.
+        if (r != c) {
+          y[c] += v * x[r];
+        }
+      }
+    }
+  } else {
+    LOG(FATAL) << "Unknown storage type: " << storage_type_;
+  }
+}
+
+void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+
+  if (storage_type_ == UNSYMMETRIC) {
+    for (int r = 0; r < num_rows_; ++r) {
+      for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+        y[cols_[idx]] += values_[idx] * x[r];
+      }
+    }
+  } else {
+    // Since the matrix is symmetric, LeftMultiply = RightMultiply.
+    RightMultiply(x, y);
+  }
+}
+
+void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
+  CHECK(x != nullptr);
+
+  std::fill(x, x + num_cols_, 0.0);
+  if (storage_type_ == UNSYMMETRIC) {
+    for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+      x[cols_[idx]] += values_[idx] * values_[idx];
+    }
+  } else if (storage_type_ == UPPER_TRIANGULAR) {
+    // Because of their block structure, we will have entries that lie
+    // above (below) the diagonal for lower (upper) triangular
+    // matrices, so the loops below need to account for this.
+    for (int r = 0; r < num_rows_; ++r) {
+      int idx = rows_[r];
+      const int idx_end = rows_[r + 1];
+
+      // For upper triangular matrices r <= c, so skip entries with r
+      // > c.
+      while (idx < idx_end && r > cols_[idx]) {
+        ++idx;
+      }
+
+      for (; idx < idx_end; ++idx) {
+        const int c = cols_[idx];
+        const double v2 = values_[idx] * values_[idx];
+        x[c] += v2;
+        // Since we are only iterating over the upper triangular part
+        // of the matrix, add contributions for the strictly lower
+        // triangular part.
+        if (r != c) {
+          x[r] += v2;
+        }
+      }
+    }
+  } else if (storage_type_ == LOWER_TRIANGULAR) {
+    for (int r = 0; r < num_rows_; ++r) {
+      int idx = rows_[r];
+      const int idx_end = rows_[r + 1];
+      // For lower triangular matrices, we only iterate till we are r >=
+      // c.
+      for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+        const int c = cols_[idx];
+        const double v2 = values_[idx] * values_[idx];
+        x[c] += v2;
+        // Since we are only iterating over the lower triangular part
+        // of the matrix, add contributions for the strictly upper
+        // triangular part.
+        if (r != c) {
+          x[r] += v2;
+        }
+      }
+    }
+  } else {
+    LOG(FATAL) << "Unknown storage type: " << storage_type_;
+  }
+}
+void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
+  CHECK(scale != nullptr);
+
+  for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+    values_[idx] *= scale[cols_[idx]];
+  }
+}
+
+void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+  CHECK(dense_matrix != nullptr);
+  dense_matrix->resize(num_rows_, num_cols_);
+  dense_matrix->setZero();
+
+  for (int r = 0; r < num_rows_; ++r) {
+    for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+      (*dense_matrix)(r, cols_[idx]) = values_[idx];
+    }
+  }
+}
+
+void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
+  CHECK_GE(delta_rows, 0);
+  CHECK_LE(delta_rows, num_rows_);
+  CHECK_EQ(storage_type_, UNSYMMETRIC);
+
+  num_rows_ -= delta_rows;
+  rows_.resize(num_rows_ + 1);
+
+  // The rest of the code updates the block information. Immediately
+  // return in case of no block information.
+  if (row_blocks_.empty()) {
+    return;
+  }
+
+  // Walk the list of row blocks until we reach the new number of rows
+  // and the drop the rest of the row blocks.
+  int num_row_blocks = 0;
+  int num_rows = 0;
+  while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
+    num_rows += row_blocks_[num_row_blocks];
+    ++num_row_blocks;
+  }
+
+  row_blocks_.resize(num_row_blocks);
+}
+
+void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
+  CHECK_EQ(storage_type_, UNSYMMETRIC);
+  CHECK_EQ(m.num_cols(), num_cols_);
+
+  CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
+        (!row_blocks_.empty() && !m.row_blocks().empty()))
+      << "Cannot append a matrix with row blocks to one without and vice versa."
+      << "This matrix has : " << row_blocks_.size() << " row blocks."
+      << "The matrix being appended has: " << m.row_blocks().size()
+      << " row blocks.";
+
+  if (m.num_rows() == 0) {
+    return;
+  }
+
+  if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
+    cols_.resize(num_nonzeros() + m.num_nonzeros());
+    values_.resize(num_nonzeros() + m.num_nonzeros());
+  }
+
+  // Copy the contents of m into this matrix.
+  DCHECK_LT(num_nonzeros(), cols_.size());
+  if (m.num_nonzeros() > 0) {
+    std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
+    std::copy(
+        m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
+  }
+
+  rows_.resize(num_rows_ + m.num_rows() + 1);
+  // new_rows = [rows_, m.row() + rows_[num_rows_]]
+  std::fill(rows_.begin() + num_rows_,
+            rows_.begin() + num_rows_ + m.num_rows() + 1,
+            rows_[num_rows_]);
+
+  for (int r = 0; r < m.num_rows() + 1; ++r) {
+    rows_[num_rows_ + r] += m.rows()[r];
+  }
+
+  num_rows_ += m.num_rows();
+
+  // The rest of the code updates the block information. Immediately
+  // return in case of no block information.
+  if (row_blocks_.empty()) {
+    return;
+  }
+
+  row_blocks_.insert(
+      row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
+}
+
+void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
+  CHECK(file != nullptr);
+  for (int r = 0; r < num_rows_; ++r) {
+    for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+      fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
+    }
+  }
+}
+
+void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
+  matrix->num_rows = num_rows_;
+  matrix->num_cols = num_cols_;
+  matrix->rows = rows_;
+  matrix->cols = cols_;
+  matrix->values = values_;
+
+  // Trim.
+  matrix->rows.resize(matrix->num_rows + 1);
+  matrix->cols.resize(matrix->rows[matrix->num_rows]);
+  matrix->values.resize(matrix->rows[matrix->num_rows]);
+}
+
+void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
+  CHECK_GE(num_nonzeros, 0);
+
+  cols_.resize(num_nonzeros);
+  values_.resize(num_nonzeros);
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+    const double* diagonal, const vector<int>& blocks) {
+  int num_rows = 0;
+  int num_nonzeros = 0;
+  for (int i = 0; i < blocks.size(); ++i) {
+    num_rows += blocks[i];
+    num_nonzeros += blocks[i] * blocks[i];
+  }
+
+  CompressedRowSparseMatrix* matrix =
+      new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros);
+
+  int* rows = matrix->mutable_rows();
+  int* cols = matrix->mutable_cols();
+  double* values = matrix->mutable_values();
+  std::fill(values, values + num_nonzeros, 0.0);
+
+  int idx_cursor = 0;
+  int col_cursor = 0;
+  for (int i = 0; i < blocks.size(); ++i) {
+    const int block_size = blocks[i];
+    for (int r = 0; r < block_size; ++r) {
+      *(rows++) = idx_cursor;
+      values[idx_cursor + r] = diagonal[col_cursor + r];
+      for (int c = 0; c < block_size; ++c, ++idx_cursor) {
+        *(cols++) = col_cursor + c;
+      }
+    }
+    col_cursor += block_size;
+  }
+  *rows = idx_cursor;
+
+  *matrix->mutable_row_blocks() = blocks;
+  *matrix->mutable_col_blocks() = blocks;
+
+  CHECK_EQ(idx_cursor, num_nonzeros);
+  CHECK_EQ(col_cursor, num_rows);
+  return matrix;
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
+  CompressedRowSparseMatrix* transpose =
+      new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
+
+  switch (storage_type_) {
+    case UNSYMMETRIC:
+      transpose->set_storage_type(UNSYMMETRIC);
+      break;
+    case LOWER_TRIANGULAR:
+      transpose->set_storage_type(UPPER_TRIANGULAR);
+      break;
+    case UPPER_TRIANGULAR:
+      transpose->set_storage_type(LOWER_TRIANGULAR);
+      break;
+    default:
+      LOG(FATAL) << "Unknown storage type: " << storage_type_;
+  };
+
+  TransposeForCompressedRowSparseStructure(num_rows(),
+                                           num_cols(),
+                                           num_nonzeros(),
+                                           rows(),
+                                           cols(),
+                                           values(),
+                                           transpose->mutable_rows(),
+                                           transpose->mutable_cols(),
+                                           transpose->mutable_values());
+
+  // The rest of the code updates the block information. Immediately
+  // return in case of no block information.
+  if (row_blocks_.empty()) {
+    return transpose;
+  }
+
+  *(transpose->mutable_row_blocks()) = col_blocks_;
+  *(transpose->mutable_col_blocks()) = row_blocks_;
+  return transpose;
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
+    CompressedRowSparseMatrix::RandomMatrixOptions options) {
+  CHECK_GT(options.num_row_blocks, 0);
+  CHECK_GT(options.min_row_block_size, 0);
+  CHECK_GT(options.max_row_block_size, 0);
+  CHECK_LE(options.min_row_block_size, options.max_row_block_size);
+
+  if (options.storage_type == UNSYMMETRIC) {
+    CHECK_GT(options.num_col_blocks, 0);
+    CHECK_GT(options.min_col_block_size, 0);
+    CHECK_GT(options.max_col_block_size, 0);
+    CHECK_LE(options.min_col_block_size, options.max_col_block_size);
+  } else {
+    // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+    options.num_col_blocks = options.num_row_blocks;
+    options.min_col_block_size = options.min_row_block_size;
+    options.max_col_block_size = options.max_row_block_size;
+  }
+
+  CHECK_GT(options.block_density, 0.0);
+  CHECK_LE(options.block_density, 1.0);
+
+  vector<int> row_blocks;
+  vector<int> col_blocks;
+
+  // Generate the row block structure.
+  for (int i = 0; i < options.num_row_blocks; ++i) {
+    // Generate a random integer in [min_row_block_size, max_row_block_size]
+    const int delta_block_size =
+        Uniform(options.max_row_block_size - options.min_row_block_size);
+    row_blocks.push_back(options.min_row_block_size + delta_block_size);
+  }
+
+  if (options.storage_type == UNSYMMETRIC) {
+    // Generate the col block structure.
+    for (int i = 0; i < options.num_col_blocks; ++i) {
+      // Generate a random integer in [min_col_block_size, max_col_block_size]
+      const int delta_block_size =
+          Uniform(options.max_col_block_size - options.min_col_block_size);
+      col_blocks.push_back(options.min_col_block_size + delta_block_size);
+    }
+  } else {
+    // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+    col_blocks = row_blocks;
+  }
+
+  vector<int> tsm_rows;
+  vector<int> tsm_cols;
+  vector<double> tsm_values;
+
+  // For ease of construction, we are going to generate the
+  // CompressedRowSparseMatrix by generating it as a
+  // TripletSparseMatrix and then converting it to a
+  // CompressedRowSparseMatrix.
+
+  // It is possible that the random matrix is empty which is likely
+  // not what the user wants, so do the matrix generation till we have
+  // at least one non-zero entry.
+  while (tsm_values.empty()) {
+    tsm_rows.clear();
+    tsm_cols.clear();
+    tsm_values.clear();
+
+    int row_block_begin = 0;
+    for (int r = 0; r < options.num_row_blocks; ++r) {
+      int col_block_begin = 0;
+      for (int c = 0; c < options.num_col_blocks; ++c) {
+        if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
+            ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
+          col_block_begin += col_blocks[c];
+          continue;
+        }
+
+        // Randomly determine if this block is present or not.
+        if (RandDouble() <= options.block_density) {
+          // If the matrix is symmetric, then we take care to generate
+          // symmetric diagonal blocks.
+          if (options.storage_type == UNSYMMETRIC || r != c) {
+            AddRandomBlock(row_blocks[r],
+                           col_blocks[c],
+                           row_block_begin,
+                           col_block_begin,
+                           &tsm_rows,
+                           &tsm_cols,
+                           &tsm_values);
+          } else {
+            AddSymmetricRandomBlock(row_blocks[r],
+                                    row_block_begin,
+                                    &tsm_rows,
+                                    &tsm_cols,
+                                    &tsm_values);
+          }
+        }
+        col_block_begin += col_blocks[c];
+      }
+      row_block_begin += row_blocks[r];
+    }
+  }
+
+  const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+  const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+  const bool kDoNotTranspose = false;
+  CompressedRowSparseMatrix* matrix =
+      CompressedRowSparseMatrix::FromTripletSparseMatrix(
+          TripletSparseMatrix(
+              num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
+          kDoNotTranspose);
+  (*matrix->mutable_row_blocks()) = row_blocks;
+  (*matrix->mutable_col_blocks()) = col_blocks;
+  matrix->set_storage_type(options.storage_type);
+  return matrix;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
new file mode 100644
index 0000000..2b51b9b
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -0,0 +1,228 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+struct CRSMatrix;
+
+namespace internal {
+
+class TripletSparseMatrix;
+
+class CompressedRowSparseMatrix : public SparseMatrix {
+ public:
+  enum StorageType {
+    UNSYMMETRIC,
+    // Matrix is assumed to be symmetric but only the lower triangular
+    // part of the matrix is stored.
+    LOWER_TRIANGULAR,
+    // Matrix is assumed to be symmetric but only the upper triangular
+    // part of the matrix is stored.
+    UPPER_TRIANGULAR
+  };
+
+  // Create a matrix with the same content as the TripletSparseMatrix
+  // input. We assume that input does not have any repeated
+  // entries.
+  //
+  // The storage type of the matrix is set to UNSYMMETRIC.
+  //
+  // Caller owns the result.
+  static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+      const TripletSparseMatrix& input);
+
+  // Create a matrix with the same content as the TripletSparseMatrix
+  // input transposed. We assume that input does not have any repeated
+  // entries.
+  //
+  // The storage type of the matrix is set to UNSYMMETRIC.
+  //
+  // Caller owns the result.
+  static CompressedRowSparseMatrix* FromTripletSparseMatrixTransposed(
+      const TripletSparseMatrix& input);
+
+  // Use this constructor only if you know what you are doing. This
+  // creates a "blank" matrix with the appropriate amount of memory
+  // allocated. However, the object itself is in an inconsistent state
+  // as the rows and cols matrices do not match the values of
+  // num_rows, num_cols and max_num_nonzeros.
+  //
+  // The use case for this constructor is that when the user knows the
+  // size of the matrix to begin with and wants to update the layout
+  // manually, instead of going via the indirect route of first
+  // constructing a TripletSparseMatrix, which leads to more than
+  // double the peak memory usage.
+  //
+  // The storage type is set to UNSYMMETRIC.
+  CompressedRowSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
+
+  // Build a square sparse diagonal matrix with num_rows rows and
+  // columns. The diagonal m(i,i) = diagonal(i);
+  //
+  // The storage type is set to UNSYMMETRIC
+  CompressedRowSparseMatrix(const double* diagonal, int num_rows);
+
+  // SparseMatrix interface.
+  virtual ~CompressedRowSparseMatrix();
+  virtual void SetZero();
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual void LeftMultiply(const double* x, double* y) const;
+  virtual void SquaredColumnNorm(double* x) const;
+  virtual void ScaleColumns(const double* scale);
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+  virtual void ToTextFile(FILE* file) const;
+  virtual int num_rows() const { return num_rows_; }
+  virtual int num_cols() const { return num_cols_; }
+  virtual int num_nonzeros() const { return rows_[num_rows_]; }
+  virtual const double* values() const { return &values_[0]; }
+  virtual double* mutable_values() { return &values_[0]; }
+
+  // Delete the bottom delta_rows.
+  // num_rows -= delta_rows
+  void DeleteRows(int delta_rows);
+
+  // Append the contents of m to the bottom of this matrix. m must
+  // have the same number of columns as this matrix.
+  void AppendRows(const CompressedRowSparseMatrix& m);
+
+  void ToCRSMatrix(CRSMatrix* matrix) const;
+
+  CompressedRowSparseMatrix* Transpose() const;
+
+  // Destructive array resizing method.
+  void SetMaxNumNonZeros(int num_nonzeros);
+
+  // Non-destructive array resizing method.
+  void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
+  void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
+
+  // Low level access methods that expose the structure of the matrix.
+  const int* cols() const { return &cols_[0]; }
+  int* mutable_cols() { return &cols_[0]; }
+
+  const int* rows() const { return &rows_[0]; }
+  int* mutable_rows() { return &rows_[0]; }
+
+  const StorageType storage_type() const { return storage_type_; }
+  void set_storage_type(const StorageType storage_type) {
+    storage_type_ = storage_type;
+  }
+
+  const std::vector<int>& row_blocks() const { return row_blocks_; }
+  std::vector<int>* mutable_row_blocks() { return &row_blocks_; }
+
+  const std::vector<int>& col_blocks() const { return col_blocks_; }
+  std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
+
+  // Create a block diagonal CompressedRowSparseMatrix with the given
+  // block structure. The individual blocks are assumed to be laid out
+  // contiguously in the diagonal array, one block at a time.
+  //
+  // Caller owns the result.
+  static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
+      const double* diagonal, const std::vector<int>& blocks);
+
+  // Options struct to control the generation of random block sparse
+  // matrices in compressed row sparse format.
+  //
+  // The random matrix generation proceeds as follows.
+  //
+  // First the row and column block structure is determined by
+  // generating random row and column block sizes that lie within the
+  // given bounds.
+  //
+  // Then we walk the block structure of the resulting matrix, and with
+  // probability block_density detemine whether they are structurally
+  // zero or not. If the answer is no, then we generate entries for the
+  // block which are distributed normally.
+  struct RandomMatrixOptions {
+    // Type of matrix to create.
+    //
+    // If storage_type is UPPER_TRIANGULAR (LOWER_TRIANGULAR), then
+    // create a square symmetric matrix with just the upper triangular
+    // (lower triangular) part. In this case, num_col_blocks,
+    // min_col_block_size and max_col_block_size will be ignored and
+    // assumed to be equal to the corresponding row settings.
+    StorageType storage_type = UNSYMMETRIC;
+
+    int num_row_blocks = 0;
+    int min_row_block_size = 0;
+    int max_row_block_size = 0;
+    int num_col_blocks = 0;
+    int min_col_block_size = 0;
+    int max_col_block_size = 0;
+
+    // 0 < block_density <= 1 is the probability of a block being
+    // present in the matrix. A given random matrix will not have
+    // precisely this density.
+    double block_density = 0.0;
+  };
+
+  // Create a random CompressedRowSparseMatrix whose entries are
+  // normally distributed and whose structure is determined by
+  // RandomMatrixOptions.
+  //
+  // Caller owns the result.
+  static CompressedRowSparseMatrix* CreateRandomMatrix(
+      RandomMatrixOptions options);
+
+ private:
+  static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+      const TripletSparseMatrix& input, bool transpose);
+
+  int num_rows_;
+  int num_cols_;
+  std::vector<int> rows_;
+  std::vector<int> cols_;
+  std::vector<double> values_;
+  StorageType storage_type_;
+
+  // If the matrix has an underlying block structure, then it can also
+  // carry with it row and column block sizes. This is auxilliary and
+  // optional information for use by algorithms operating on the
+  // matrix. The class itself does not make use of this information in
+  // any way.
+  std::vector<int> row_blocks_;
+  std::vector<int> col_blocks_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
new file mode 100644
index 0000000..cf6e2e4
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -0,0 +1,601 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+#include <memory>
+#include <numeric>
+#include "ceres/casts.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/random.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#include "Eigen/SparseCore"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+void CompareMatrices(const SparseMatrix* a, const SparseMatrix* b) {
+  EXPECT_EQ(a->num_rows(), b->num_rows());
+  EXPECT_EQ(a->num_cols(), b->num_cols());
+
+  int num_rows = a->num_rows();
+  int num_cols = a->num_cols();
+
+  for (int i = 0; i < num_cols; ++i) {
+    Vector x = Vector::Zero(num_cols);
+    x(i) = 1.0;
+
+    Vector y_a = Vector::Zero(num_rows);
+    Vector y_b = Vector::Zero(num_rows);
+
+    a->RightMultiply(x.data(), y_a.data());
+    b->RightMultiply(x.data(), y_b.data());
+
+    EXPECT_EQ((y_a - y_b).norm(), 0);
+  }
+}
+
+class CompressedRowSparseMatrixTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(1));
+
+    CHECK(problem != nullptr);
+
+    tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+    crsm.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+
+    num_rows = tsm->num_rows();
+    num_cols = tsm->num_cols();
+
+    vector<int>* row_blocks = crsm->mutable_row_blocks();
+    row_blocks->resize(num_rows);
+    std::fill(row_blocks->begin(), row_blocks->end(), 1);
+
+    vector<int>* col_blocks = crsm->mutable_col_blocks();
+    col_blocks->resize(num_cols);
+    std::fill(col_blocks->begin(), col_blocks->end(), 1);
+  }
+
+  int num_rows;
+  int num_cols;
+
+  std::unique_ptr<TripletSparseMatrix> tsm;
+  std::unique_ptr<CompressedRowSparseMatrix> crsm;
+};
+
+TEST_F(CompressedRowSparseMatrixTest, Scale) {
+  Vector scale(num_cols);
+  for (int i = 0; i < num_cols; ++i) {
+    scale(i) = i + 1;
+  }
+
+  tsm->ScaleColumns(scale.data());
+  crsm->ScaleColumns(scale.data());
+  CompareMatrices(tsm.get(), crsm.get());
+}
+
+TEST_F(CompressedRowSparseMatrixTest, DeleteRows) {
+  // Clear the row and column blocks as these are purely scalar tests.
+  crsm->mutable_row_blocks()->clear();
+  crsm->mutable_col_blocks()->clear();
+
+  for (int i = 0; i < num_rows; ++i) {
+    tsm->Resize(num_rows - i, num_cols);
+    crsm->DeleteRows(crsm->num_rows() - tsm->num_rows());
+    CompareMatrices(tsm.get(), crsm.get());
+  }
+}
+
+TEST_F(CompressedRowSparseMatrixTest, AppendRows) {
+  // Clear the row and column blocks as these are purely scalar tests.
+  crsm->mutable_row_blocks()->clear();
+  crsm->mutable_col_blocks()->clear();
+
+  for (int i = 0; i < num_rows; ++i) {
+    TripletSparseMatrix tsm_appendage(*tsm);
+    tsm_appendage.Resize(i, num_cols);
+
+    tsm->AppendRows(tsm_appendage);
+    std::unique_ptr<CompressedRowSparseMatrix> crsm_appendage(
+        CompressedRowSparseMatrix::FromTripletSparseMatrix(tsm_appendage));
+
+    crsm->AppendRows(*crsm_appendage);
+    CompareMatrices(tsm.get(), crsm.get());
+  }
+}
+
+TEST_F(CompressedRowSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
+  int num_diagonal_rows = crsm->num_cols();
+
+  std::unique_ptr<double[]> diagonal(new double[num_diagonal_rows]);
+  for (int i = 0; i < num_diagonal_rows; ++i) {
+    diagonal[i] = i;
+  }
+
+  vector<int> row_and_column_blocks;
+  row_and_column_blocks.push_back(1);
+  row_and_column_blocks.push_back(2);
+  row_and_column_blocks.push_back(2);
+
+  const vector<int> pre_row_blocks = crsm->row_blocks();
+  const vector<int> pre_col_blocks = crsm->col_blocks();
+
+  std::unique_ptr<CompressedRowSparseMatrix> appendage(
+      CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+          diagonal.get(), row_and_column_blocks));
+
+  crsm->AppendRows(*appendage);
+
+  const vector<int> post_row_blocks = crsm->row_blocks();
+  const vector<int> post_col_blocks = crsm->col_blocks();
+
+  vector<int> expected_row_blocks = pre_row_blocks;
+  expected_row_blocks.insert(expected_row_blocks.end(),
+                             row_and_column_blocks.begin(),
+                             row_and_column_blocks.end());
+
+  vector<int> expected_col_blocks = pre_col_blocks;
+
+  EXPECT_EQ(expected_row_blocks, crsm->row_blocks());
+  EXPECT_EQ(expected_col_blocks, crsm->col_blocks());
+
+  crsm->DeleteRows(num_diagonal_rows);
+  EXPECT_EQ(crsm->row_blocks(), pre_row_blocks);
+  EXPECT_EQ(crsm->col_blocks(), pre_col_blocks);
+}
+
+TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
+  Matrix tsm_dense;
+  Matrix crsm_dense;
+
+  tsm->ToDenseMatrix(&tsm_dense);
+  crsm->ToDenseMatrix(&crsm_dense);
+
+  EXPECT_EQ((tsm_dense - crsm_dense).norm(), 0.0);
+}
+
+TEST_F(CompressedRowSparseMatrixTest, ToCRSMatrix) {
+  CRSMatrix crs_matrix;
+  crsm->ToCRSMatrix(&crs_matrix);
+  EXPECT_EQ(crsm->num_rows(), crs_matrix.num_rows);
+  EXPECT_EQ(crsm->num_cols(), crs_matrix.num_cols);
+  EXPECT_EQ(crsm->num_rows() + 1, crs_matrix.rows.size());
+  EXPECT_EQ(crsm->num_nonzeros(), crs_matrix.cols.size());
+  EXPECT_EQ(crsm->num_nonzeros(), crs_matrix.values.size());
+
+  for (int i = 0; i < crsm->num_rows() + 1; ++i) {
+    EXPECT_EQ(crsm->rows()[i], crs_matrix.rows[i]);
+  }
+
+  for (int i = 0; i < crsm->num_nonzeros(); ++i) {
+    EXPECT_EQ(crsm->cols()[i], crs_matrix.cols[i]);
+    EXPECT_EQ(crsm->values()[i], crs_matrix.values[i]);
+  }
+}
+
+TEST(CompressedRowSparseMatrix, CreateBlockDiagonalMatrix) {
+  vector<int> blocks;
+  blocks.push_back(1);
+  blocks.push_back(2);
+  blocks.push_back(2);
+
+  Vector diagonal(5);
+  for (int i = 0; i < 5; ++i) {
+    diagonal(i) = i + 1;
+  }
+
+  std::unique_ptr<CompressedRowSparseMatrix> matrix(
+      CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(diagonal.data(),
+                                                           blocks));
+
+  EXPECT_EQ(matrix->num_rows(), 5);
+  EXPECT_EQ(matrix->num_cols(), 5);
+  EXPECT_EQ(matrix->num_nonzeros(), 9);
+  EXPECT_EQ(blocks, matrix->row_blocks());
+  EXPECT_EQ(blocks, matrix->col_blocks());
+
+  Vector x(5);
+  Vector y(5);
+
+  x.setOnes();
+  y.setZero();
+  matrix->RightMultiply(x.data(), y.data());
+  for (int i = 0; i < diagonal.size(); ++i) {
+    EXPECT_EQ(y[i], diagonal[i]);
+  }
+
+  y.setZero();
+  matrix->LeftMultiply(x.data(), y.data());
+  for (int i = 0; i < diagonal.size(); ++i) {
+    EXPECT_EQ(y[i], diagonal[i]);
+  }
+
+  Matrix dense;
+  matrix->ToDenseMatrix(&dense);
+  EXPECT_EQ((dense.diagonal() - diagonal).norm(), 0.0);
+}
+
+TEST(CompressedRowSparseMatrix, Transpose) {
+  //  0  1  0  2  3  0
+  //  4  6  7  0  0  8
+  //  9 10  0 11 12  0
+  // 13  0 14 15  9  0
+  //  0 16 17  0  0  0
+
+  // Block structure:
+  //  A  A  A  A  B  B
+  //  A  A  A  A  B  B
+  //  A  A  A  A  B  B
+  //  C  C  C  C  D  D
+  //  C  C  C  C  D  D
+  //  C  C  C  C  D  D
+
+  CompressedRowSparseMatrix matrix(5, 6, 30);
+  int* rows = matrix.mutable_rows();
+  int* cols = matrix.mutable_cols();
+  double* values = matrix.mutable_values();
+  matrix.mutable_row_blocks()->push_back(3);
+  matrix.mutable_row_blocks()->push_back(3);
+  matrix.mutable_col_blocks()->push_back(4);
+  matrix.mutable_col_blocks()->push_back(2);
+
+  rows[0] = 0;
+  cols[0] = 1;
+  cols[1] = 3;
+  cols[2] = 4;
+
+  rows[1] = 3;
+  cols[3] = 0;
+  cols[4] = 1;
+  cols[5] = 2;
+  cols[6] = 5;
+
+  rows[2] = 7;
+  cols[7] = 0;
+  cols[8] = 1;
+  cols[9] = 3;
+  cols[10] = 4;
+
+  rows[3] = 11;
+  cols[11] = 0;
+  cols[12] = 2;
+  cols[13] = 3;
+  cols[14] = 4;
+
+  rows[4] = 15;
+  cols[15] = 1;
+  cols[16] = 2;
+  rows[5] = 17;
+
+  std::copy(values, values + 17, cols);
+
+  std::unique_ptr<CompressedRowSparseMatrix> transpose(matrix.Transpose());
+
+  ASSERT_EQ(transpose->row_blocks().size(), matrix.col_blocks().size());
+  for (int i = 0; i < transpose->row_blocks().size(); ++i) {
+    EXPECT_EQ(transpose->row_blocks()[i], matrix.col_blocks()[i]);
+  }
+
+  ASSERT_EQ(transpose->col_blocks().size(), matrix.row_blocks().size());
+  for (int i = 0; i < transpose->col_blocks().size(); ++i) {
+    EXPECT_EQ(transpose->col_blocks()[i], matrix.row_blocks()[i]);
+  }
+
+  Matrix dense_matrix;
+  matrix.ToDenseMatrix(&dense_matrix);
+
+  Matrix dense_transpose;
+  transpose->ToDenseMatrix(&dense_transpose);
+  EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
+}
+
+TEST(CompressedRowSparseMatrix, FromTripletSparseMatrix) {
+  TripletSparseMatrix::RandomMatrixOptions options;
+  options.num_rows = 5;
+  options.num_cols = 7;
+  options.density = 0.5;
+
+  const int kNumTrials = 10;
+  for (int i = 0; i < kNumTrials; ++i) {
+    std::unique_ptr<TripletSparseMatrix> tsm(
+        TripletSparseMatrix::CreateRandomMatrix(options));
+    std::unique_ptr<CompressedRowSparseMatrix> crsm(
+        CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+
+    Matrix expected;
+    tsm->ToDenseMatrix(&expected);
+    Matrix actual;
+    crsm->ToDenseMatrix(&actual);
+    EXPECT_NEAR((expected - actual).norm() / actual.norm(),
+                0.0,
+                std::numeric_limits<double>::epsilon())
+        << "\nexpected: \n"
+        << expected << "\nactual: \n"
+        << actual;
+  }
+}
+
+TEST(CompressedRowSparseMatrix, FromTripletSparseMatrixTransposed) {
+  TripletSparseMatrix::RandomMatrixOptions options;
+  options.num_rows = 5;
+  options.num_cols = 7;
+  options.density = 0.5;
+
+  const int kNumTrials = 10;
+  for (int i = 0; i < kNumTrials; ++i) {
+    std::unique_ptr<TripletSparseMatrix> tsm(
+        TripletSparseMatrix::CreateRandomMatrix(options));
+    std::unique_ptr<CompressedRowSparseMatrix> crsm(
+        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+
+    Matrix tmp;
+    tsm->ToDenseMatrix(&tmp);
+    Matrix expected = tmp.transpose();
+    Matrix actual;
+    crsm->ToDenseMatrix(&actual);
+    EXPECT_NEAR((expected - actual).norm() / actual.norm(),
+                0.0,
+                std::numeric_limits<double>::epsilon())
+        << "\nexpected: \n"
+        << expected << "\nactual: \n"
+        << actual;
+  }
+}
+
+typedef ::testing::tuple<CompressedRowSparseMatrix::StorageType> Param;
+
+std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
+  if (::testing::get<0>(info.param) ==
+      CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    return "UPPER";
+  }
+
+  if (::testing::get<0>(info.param) ==
+      CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+    return "LOWER";
+  }
+
+  return "UNSYMMETRIC";
+}
+
+class RightMultiplyTest : public ::testing::TestWithParam<Param> {};
+
+TEST_P(RightMultiplyTest, _) {
+  const int kMinNumBlocks = 1;
+  const int kMaxNumBlocks = 10;
+  const int kMinBlockSize = 1;
+  const int kMaxBlockSize = 5;
+  const int kNumTrials = 10;
+
+  for (int num_blocks = kMinNumBlocks; num_blocks < kMaxNumBlocks;
+       ++num_blocks) {
+    for (int trial = 0; trial < kNumTrials; ++trial) {
+      Param param = GetParam();
+      CompressedRowSparseMatrix::RandomMatrixOptions options;
+      options.num_col_blocks = num_blocks;
+      options.min_col_block_size = kMinBlockSize;
+      options.max_col_block_size = kMaxBlockSize;
+      options.num_row_blocks = 2 * num_blocks;
+      options.min_row_block_size = kMinBlockSize;
+      options.max_row_block_size = kMaxBlockSize;
+      options.block_density = std::max(0.5, RandDouble());
+      options.storage_type = ::testing::get<0>(param);
+      std::unique_ptr<CompressedRowSparseMatrix> matrix(
+          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      const int num_rows = matrix->num_rows();
+      const int num_cols = matrix->num_cols();
+
+      Vector x(num_cols);
+      x.setRandom();
+
+      Vector actual_y(num_rows);
+      actual_y.setZero();
+      matrix->RightMultiply(x.data(), actual_y.data());
+
+      Matrix dense;
+      matrix->ToDenseMatrix(&dense);
+      Vector expected_y;
+      if (::testing::get<0>(param) ==
+          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+        expected_y = dense.selfadjointView<Eigen::Upper>() * x;
+      } else if (::testing::get<0>(param) ==
+                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+        expected_y = dense.selfadjointView<Eigen::Lower>() * x;
+      } else {
+        expected_y = dense * x;
+      }
+
+      ASSERT_NEAR((expected_y - actual_y).norm() / actual_y.norm(),
+                  0.0,
+                  std::numeric_limits<double>::epsilon() * 10)
+          << "\n"
+          << dense
+          << "x:\n"
+          << x.transpose() << "\n"
+          << "expected: \n" << expected_y.transpose() << "\n"
+          << "actual: \n" << actual_y.transpose();
+    }
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(
+    CompressedRowSparseMatrix,
+    RightMultiplyTest,
+    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ParamInfoToString);
+
+class LeftMultiplyTest : public ::testing::TestWithParam<Param> {};
+
+TEST_P(LeftMultiplyTest, _) {
+  const int kMinNumBlocks = 1;
+  const int kMaxNumBlocks = 10;
+  const int kMinBlockSize = 1;
+  const int kMaxBlockSize = 5;
+  const int kNumTrials = 10;
+
+  for (int num_blocks = kMinNumBlocks; num_blocks < kMaxNumBlocks;
+       ++num_blocks) {
+    for (int trial = 0; trial < kNumTrials; ++trial) {
+      Param param = GetParam();
+      CompressedRowSparseMatrix::RandomMatrixOptions options;
+      options.num_col_blocks = num_blocks;
+      options.min_col_block_size = kMinBlockSize;
+      options.max_col_block_size = kMaxBlockSize;
+      options.num_row_blocks = 2 * num_blocks;
+      options.min_row_block_size = kMinBlockSize;
+      options.max_row_block_size = kMaxBlockSize;
+      options.block_density = std::max(0.5, RandDouble());
+      options.storage_type = ::testing::get<0>(param);
+      std::unique_ptr<CompressedRowSparseMatrix> matrix(
+          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      const int num_rows = matrix->num_rows();
+      const int num_cols = matrix->num_cols();
+
+      Vector x(num_rows);
+      x.setRandom();
+
+      Vector actual_y(num_cols);
+      actual_y.setZero();
+      matrix->LeftMultiply(x.data(), actual_y.data());
+
+      Matrix dense;
+      matrix->ToDenseMatrix(&dense);
+      Vector expected_y;
+      if (::testing::get<0>(param) ==
+          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+        expected_y = dense.selfadjointView<Eigen::Upper>() * x;
+      } else if (::testing::get<0>(param) ==
+                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+        expected_y = dense.selfadjointView<Eigen::Lower>() * x;
+      } else {
+        expected_y = dense.transpose() * x;
+      }
+
+      ASSERT_NEAR((expected_y - actual_y).norm() / actual_y.norm(),
+                  0.0,
+                  std::numeric_limits<double>::epsilon() * 10)
+          << "\n"
+          << dense
+          << "x\n"
+          << x.transpose() << "\n"
+          << "expected: \n" << expected_y.transpose() << "\n"
+          << "actual: \n" << actual_y.transpose();
+    }
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(
+    CompressedRowSparseMatrix,
+    LeftMultiplyTest,
+    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ParamInfoToString);
+
+class SquaredColumnNormTest : public ::testing::TestWithParam<Param> {};
+
+TEST_P(SquaredColumnNormTest, _) {
+  const int kMinNumBlocks = 1;
+  const int kMaxNumBlocks = 10;
+  const int kMinBlockSize = 1;
+  const int kMaxBlockSize = 5;
+  const int kNumTrials = 10;
+
+  for (int num_blocks = kMinNumBlocks; num_blocks < kMaxNumBlocks;
+       ++num_blocks) {
+    for (int trial = 0; trial < kNumTrials; ++trial) {
+      Param param = GetParam();
+      CompressedRowSparseMatrix::RandomMatrixOptions options;
+      options.num_col_blocks = num_blocks;
+      options.min_col_block_size = kMinBlockSize;
+      options.max_col_block_size = kMaxBlockSize;
+      options.num_row_blocks = 2 * num_blocks;
+      options.min_row_block_size = kMinBlockSize;
+      options.max_row_block_size = kMaxBlockSize;
+      options.block_density = std::max(0.5, RandDouble());
+      options.storage_type = ::testing::get<0>(param);
+      std::unique_ptr<CompressedRowSparseMatrix> matrix(
+          CompressedRowSparseMatrix::CreateRandomMatrix(options));
+      const int num_cols = matrix->num_cols();
+
+      Vector actual(num_cols);
+      actual.setZero();
+      matrix->SquaredColumnNorm(actual.data());
+
+      Matrix dense;
+      matrix->ToDenseMatrix(&dense);
+      Vector expected;
+      if (::testing::get<0>(param) ==
+          CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+        const Matrix full = dense.selfadjointView<Eigen::Upper>();
+        expected = full.colwise().squaredNorm();
+      } else if (::testing::get<0>(param) ==
+                 CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+        const Matrix full = dense.selfadjointView<Eigen::Lower>();
+        expected = full.colwise().squaredNorm();
+      } else {
+        expected = dense.colwise().squaredNorm();
+      }
+
+      ASSERT_NEAR((expected - actual).norm() / actual.norm(),
+                  0.0,
+                  std::numeric_limits<double>::epsilon() * 10)
+          << "\n"
+          << dense
+          << "expected: \n" << expected.transpose() << "\n"
+          << "actual: \n" << actual.transpose();
+    }
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(
+    CompressedRowSparseMatrix,
+    SquaredColumnNormTest,
+    ::testing::Values(CompressedRowSparseMatrix::LOWER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UPPER_TRIANGULAR,
+                      CompressedRowSparseMatrix::UNSYMMETRIC),
+    ParamInfoToString);
+
+
+// TODO(sameeragarwal) Add tests for the random matrix creation methods.
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/concurrent_queue.h b/internal/ceres/concurrent_queue.h
new file mode 100644
index 0000000..52e2903
--- /dev/null
+++ b/internal/ceres/concurrent_queue.h
@@ -0,0 +1,159 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_CONCURRENT_QUEUE_H_
+#define CERES_INTERNAL_CONCURRENT_QUEUE_H_
+
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread-safe multi-producer, multi-consumer queue for queueing items that
+// are typically handled asynchronously by multiple threads. The ConcurrentQueue
+// has two states which only affect the Wait call:
+//
+//  (1) Waiters have been enabled (enabled by default or calling
+//      EnableWaiters). The call to Wait will block until an item is available.
+//      Push and pop will operate as expected.
+//
+//  (2) StopWaiters has been called. All threads blocked in a Wait() call will
+//      be woken up and pop any available items from the queue. All future Wait
+//      requests will either return an element from the queue or return
+//      immediately if no element is present.  Push and pop will operate as
+//      expected.
+//
+// A common use case is using the concurrent queue as an interface for
+// scheduling tasks for a set of thread workers:
+//
+// ConcurrentQueue<Task> task_queue;
+//
+// [Worker threads]:
+//   Task task;
+//   while(task_queue.Wait(&task)) {
+//     ...
+//   }
+//
+// [Producers]:
+//   task_queue.Push(...);
+//   ..
+//   task_queue.Push(...);
+//   ...
+//   // Signal worker threads to stop blocking on Wait and terminate.
+//   task_queue.StopWaiters();
+//
+template <typename T>
+class ConcurrentQueue {
+ public:
+  // Defaults the queue to blocking on Wait calls.
+  ConcurrentQueue() : wait_(true) {}
+
+  // Atomically push an element onto the queue.  If a thread was waiting for an
+  // element, wake it up.
+  void Push(const T& value) {
+    std::lock_guard<std::mutex> lock(mutex_);
+    queue_.push(value);
+    work_pending_condition_.notify_one();
+  }
+
+  // Atomically pop an element from the queue.  If an element is present, return
+  // true. If the queue was empty, return false.
+  bool Pop(T* value) {
+    CHECK(value != nullptr);
+
+    std::lock_guard<std::mutex> lock(mutex_);
+    return PopUnlocked(value);
+  }
+
+  // Atomically pop an element from the queue. Blocks until one is available or
+  // StopWaiters is called.  Returns true if an element was successfully popped
+  // from the queue, otherwise returns false.
+  bool Wait(T* value) {
+    CHECK(value != nullptr);
+
+    std::unique_lock<std::mutex> lock(mutex_);
+    work_pending_condition_.wait(lock,
+                                 [&]() { return !(wait_ && queue_.empty()); });
+
+    return PopUnlocked(value);
+  }
+
+  // Unblock all threads waiting to pop a value from the queue, and they will
+  // exit Wait() without getting a value. All future Wait requests will return
+  // immediately if no element is present until EnableWaiters is called.
+  void StopWaiters() {
+    std::lock_guard<std::mutex> lock(mutex_);
+    wait_ = false;
+    work_pending_condition_.notify_all();
+  }
+
+  // Enable threads to block on Wait calls.
+  void EnableWaiters() {
+    std::lock_guard<std::mutex> lock(mutex_);
+    wait_ = true;
+  }
+
+ private:
+  // Pops an element from the queue.  If an element is present, return
+  // true. If the queue was empty, return false.  Not thread-safe. Must acquire
+  // the lock before calling.
+  bool PopUnlocked(T* value) {
+    if (queue_.empty()) {
+      return false;
+    }
+
+    *value = queue_.front();
+    queue_.pop();
+
+    return true;
+  }
+
+  // The mutex controls read and write access to the queue_ and stop_
+  // variables. It is also used to block the calling thread until an element is
+  // available to pop from the queue.
+  std::mutex mutex_;
+  std::condition_variable work_pending_condition_;
+
+  std::queue<T> queue_;
+  // If true, signals that callers of Wait will block waiting to pop an
+  // element off the queue.
+  bool wait_;
+};
+
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CONCURRENT_QUEUE_H_
diff --git a/internal/ceres/concurrent_queue_test.cc b/internal/ceres/concurrent_queue_test.cc
new file mode 100644
index 0000000..698966a
--- /dev/null
+++ b/internal/ceres/concurrent_queue_test.cc
@@ -0,0 +1,307 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+
+#include <chrono>
+#include <thread>
+
+#include "ceres/concurrent_queue.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// A basic test of push and pop.
+TEST(ConcurrentQueue, PushPop) {
+  ConcurrentQueue<int> queue;
+
+  const int num_to_add = 10;
+  for (int i = 0; i < num_to_add; ++i) {
+    queue.Push(i);
+  }
+
+  for (int i = 0; i < num_to_add; ++i) {
+    int value;
+    ASSERT_TRUE(queue.Pop(&value));
+    EXPECT_EQ(i, value);
+  }
+}
+
+// Push and pop elements from the queue after StopWaiters has been called.
+TEST(ConcurrentQueue, PushPopAfterStopWaiters) {
+  ConcurrentQueue<int> queue;
+
+  const int num_to_add = 10;
+  int value;
+
+  // Pop should return immediately with false with an empty queue.
+  ASSERT_FALSE(queue.Pop(&value));
+
+  for (int i = 0; i < num_to_add; ++i) {
+    queue.Push(i);
+  }
+
+  // Call stop waiters to ensure we can still Push and Pop from the queue.
+  queue.StopWaiters();
+
+  for (int i = 0; i < num_to_add; ++i) {
+    ASSERT_TRUE(queue.Pop(&value));
+    EXPECT_EQ(i, value);
+  }
+
+  // Pop should return immediately with false with an empty queue.
+  ASSERT_FALSE(queue.Pop(&value));
+
+  // Ensure we can still push onto the queue after StopWaiters has been called.
+  const int offset = 123;
+  for (int i = 0; i < num_to_add; ++i) {
+    queue.Push(i + offset);
+  }
+
+  for (int i = 0; i < num_to_add; ++i) {
+    int value;
+    ASSERT_TRUE(queue.Pop(&value));
+    EXPECT_EQ(i + offset, value);
+  }
+
+  // Pop should return immediately with false with an empty queue.
+  ASSERT_FALSE(queue.Pop(&value));
+
+  // Try calling StopWaiters again to ensure nothing changes.
+  queue.StopWaiters();
+
+  queue.Push(13456);
+  ASSERT_TRUE(queue.Pop(&value));
+  EXPECT_EQ(13456, value);
+}
+
+// Push and pop elements after StopWaiters and EnableWaiters has been called.
+TEST(ConcurrentQueue, PushPopStopAndStart) {
+  ConcurrentQueue<int> queue;
+
+  int value;
+
+  queue.Push(13456);
+  queue.Push(256);
+
+  queue.StopWaiters();
+
+  ASSERT_TRUE(queue.Pop(&value));
+  EXPECT_EQ(13456, value);
+
+  queue.EnableWaiters();
+
+  // Try adding another entry after enable has been called.
+  queue.Push(989);
+
+  // Ensure we can pop both elements off.
+  ASSERT_TRUE(queue.Pop(&value));
+  EXPECT_EQ(256, value);
+
+  ASSERT_TRUE(queue.Pop(&value));
+  EXPECT_EQ(989, value);
+
+  // Re-enable waiting.
+  queue.EnableWaiters();
+
+  // Pop should return immediately with false with an empty queue.
+  ASSERT_FALSE(queue.Pop(&value));
+}
+
+// A basic test for Wait.
+TEST(ConcurrentQueue, Wait) {
+  ConcurrentQueue<int> queue;
+
+  int value;
+
+  queue.Push(13456);
+
+  ASSERT_TRUE(queue.Wait(&value));
+  EXPECT_EQ(13456, value);
+
+  queue.StopWaiters();
+
+  // Ensure waiting returns immediately after StopWaiters.
+  EXPECT_FALSE(queue.Wait(&value));
+  EXPECT_FALSE(queue.Wait(&value));
+
+  EXPECT_FALSE(queue.Pop(&value));
+
+  // Calling StopWaiters multiple times does not change anything.
+  queue.StopWaiters();
+
+  EXPECT_FALSE(queue.Wait(&value));
+  EXPECT_FALSE(queue.Wait(&value));
+
+  queue.Push(989);
+  queue.Push(789);
+
+  ASSERT_TRUE(queue.Wait(&value));
+  EXPECT_EQ(989, value);
+
+  ASSERT_TRUE(queue.Wait(&value));
+  EXPECT_EQ(789, value);
+}
+
+// Ensure wait blocks until an element is pushed. Also ensure wait does not
+// block after StopWaiters is called and there is no value in the queue.
+// Finally, ensures EnableWaiters re-enables waiting.
+TEST(ConcurrentQueue, EnsureWaitBlocks) {
+  ConcurrentQueue<int> queue;
+
+  int value = 0;
+  bool valid_value = false;
+  bool waiting = false;
+  std::mutex mutex;
+
+  std::thread thread([&]() {
+    {
+      std::lock_guard<std::mutex> lock(mutex);
+      waiting = true;
+    }
+
+    int element = 87987;
+    bool valid = queue.Wait(&element);
+
+    {
+      std::lock_guard<std::mutex> lock(mutex);
+      waiting = false;
+      value = element;
+      valid_value = valid;
+    }
+  });
+
+  // Give the thread time to start and wait.
+  std::this_thread::sleep_for(std::chrono::milliseconds(500));
+
+  // Ensure nothing is has been popped off the queue
+  {
+    std::lock_guard<std::mutex> lock(mutex);
+    EXPECT_TRUE(waiting);
+    ASSERT_FALSE(valid_value);
+    ASSERT_EQ(0, value);
+  }
+
+  queue.Push(13456);
+
+  // Wait for the thread to pop the value.
+  thread.join();
+
+  EXPECT_TRUE(valid_value);
+  EXPECT_EQ(13456, value);
+}
+
+TEST(ConcurrentQueue, StopAndEnableWaiters) {
+  ConcurrentQueue<int> queue;
+
+  int value = 0;
+  bool valid_value = false;
+  bool waiting = false;
+  std::mutex mutex;
+
+  auto task = [&]() {
+    {
+      std::lock_guard<std::mutex> lock(mutex);
+      waiting = true;
+    }
+
+    int element = 87987;
+    bool valid = queue.Wait(&element);
+
+    {
+      std::lock_guard<std::mutex> lock(mutex);
+      waiting = false;
+      value = element;
+      valid_value = valid;
+    }
+  };
+
+  std::thread thread_1(task);
+
+  // Give the thread time to start and wait.
+  std::this_thread::sleep_for(std::chrono::milliseconds(500));
+
+  // Ensure the thread is waiting.
+  {
+    std::lock_guard<std::mutex> lock(mutex);
+    EXPECT_TRUE(waiting);
+  }
+
+  // Unblock the thread.
+  queue.StopWaiters();
+
+  thread_1.join();
+
+  // Ensure nothing has been popped off the queue.
+  EXPECT_FALSE(valid_value);
+  EXPECT_EQ(87987, value);
+
+  // Ensure another call to Wait returns immediately.
+  EXPECT_FALSE(queue.Wait(&value));
+
+  queue.EnableWaiters();
+
+  value = 0;
+  valid_value = false;
+  waiting = false;
+
+  // Start another task waiting for an element to be pushed.
+  std::thread thread_2(task);
+
+  // Give the thread time to start and wait.
+  std::this_thread::sleep_for(std::chrono::milliseconds(500));
+
+  // Ensure nothing is popped off the queue.
+  {
+    std::lock_guard<std::mutex> lock(mutex);
+    EXPECT_TRUE(waiting);
+    ASSERT_FALSE(valid_value);
+    ASSERT_EQ(0, value);
+  }
+
+  queue.Push(13456);
+
+  // Wait for the thread to pop the value.
+  thread_2.join();
+
+  EXPECT_TRUE(valid_value);
+  EXPECT_EQ(13456, value);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_USE_CXX11_THREADS
diff --git a/internal/ceres/conditioned_cost_function.cc b/internal/ceres/conditioned_cost_function.cc
new file mode 100644
index 0000000..d933ad7
--- /dev/null
+++ b/internal/ceres/conditioned_cost_function.cc
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains the implementation of the conditioned cost function.
+
+#include "ceres/conditioned_cost_function.h"
+
+#include <cstddef>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/stl_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+// This cost function has the same dimensions (parameters, residuals) as
+// the one it's wrapping.
+ConditionedCostFunction::ConditionedCostFunction(
+    CostFunction* wrapped_cost_function,
+    const std::vector<CostFunction*>& conditioners,
+    Ownership ownership)
+    : wrapped_cost_function_(wrapped_cost_function),
+      conditioners_(conditioners),
+      ownership_(ownership) {
+  // Set up our dimensions.
+  set_num_residuals(wrapped_cost_function_->num_residuals());
+  *mutable_parameter_block_sizes() =
+      wrapped_cost_function_->parameter_block_sizes();
+
+  // Sanity-check the conditioners' dimensions.
+  CHECK_EQ(wrapped_cost_function_->num_residuals(), conditioners_.size());
+  for (int i = 0; i < wrapped_cost_function_->num_residuals(); i++) {
+    if (conditioners[i]) {
+      CHECK_EQ(1, conditioners[i]->num_residuals());
+      CHECK_EQ(1, conditioners[i]->parameter_block_sizes().size());
+      CHECK_EQ(1, conditioners[i]->parameter_block_sizes()[0]);
+    }
+  }
+}
+
+ConditionedCostFunction::~ConditionedCostFunction() {
+  if (ownership_ == TAKE_OWNERSHIP) {
+    STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
+  } else {
+    wrapped_cost_function_.release();
+  }
+}
+
+bool ConditionedCostFunction::Evaluate(double const* const* parameters,
+                                       double* residuals,
+                                       double** jacobians) const {
+  bool success = wrapped_cost_function_->Evaluate(parameters, residuals,
+                                                  jacobians);
+  if (!success) {
+    return false;
+  }
+
+  for (int r = 0; r < wrapped_cost_function_->num_residuals(); r++) {
+    // On output, we want to have
+    // residuals[r] = conditioners[r](wrapped_residuals[r])
+    // For parameter block i, column c,
+    // jacobians[i][r*parameter_block_size_[i] + c] =
+    //   = d residual[r] / d parameters[i][c]
+    //   = conditioners[r]'(wrapped_residuals[r]) *
+    //       d wrapped_residuals[r] / d parameters[i][c]
+    if (conditioners_[r]) {
+      double conditioner_derivative;
+      double* conditioner_derivative_pointer = &conditioner_derivative;
+      double** conditioner_derivative_pointer2 =
+          &conditioner_derivative_pointer;
+      if (!jacobians) {
+        conditioner_derivative_pointer2 = NULL;
+      }
+
+      double unconditioned_residual = residuals[r];
+      double* parameter_pointer = &unconditioned_residual;
+      success = conditioners_[r]->Evaluate(&parameter_pointer,
+                                           &residuals[r],
+                                           conditioner_derivative_pointer2);
+      if (!success) {
+        return false;
+      }
+
+      if (jacobians) {
+        for (int i = 0;
+             i < wrapped_cost_function_->parameter_block_sizes().size();
+             i++) {
+          if (jacobians[i]) {
+            int parameter_block_size =
+                wrapped_cost_function_->parameter_block_sizes()[i];
+            VectorRef jacobian_row(jacobians[i] + r * parameter_block_size,
+                                   parameter_block_size, 1);
+            jacobian_row *= conditioner_derivative;
+          }
+        }
+      }
+    }
+  }
+  return true;
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/conditioned_cost_function_test.cc b/internal/ceres/conditioned_cost_function_test.cc
new file mode 100644
index 0000000..6297451
--- /dev/null
+++ b/internal/ceres/conditioned_cost_function_test.cc
@@ -0,0 +1,141 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// Tests for the conditioned cost function.
+
+#include "ceres/conditioned_cost_function.h"
+
+#include "ceres/internal/eigen.h"
+#include "ceres/normal_prior.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// The size of the cost functions we build.
+static const int kTestCostFunctionSize = 3;
+
+// A simple cost function: return ax + b.
+class LinearCostFunction : public CostFunction {
+ public:
+  LinearCostFunction(double a, double b) : a_(a), b_(b) {
+    set_num_residuals(1);
+    mutable_parameter_block_sizes()->push_back(1);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    *residuals = **parameters * a_ + b_;
+    if (jacobians && *jacobians) {
+      **jacobians = a_;
+    }
+
+    return true;
+  }
+
+ private:
+  const double a_, b_;
+};
+
+// Tests that ConditionedCostFunction does what it's supposed to.
+TEST(ConditionedCostFunction, NormalOperation) {
+  double v1[kTestCostFunctionSize], v2[kTestCostFunctionSize],
+      jac[kTestCostFunctionSize * kTestCostFunctionSize],
+      result[kTestCostFunctionSize];
+
+  for (int i = 0; i < kTestCostFunctionSize; i++) {
+    v1[i] = i;
+    v2[i] = i * 10;
+    // Seed a few garbage values in the Jacobian matrix, to make sure that
+    // they're overwritten.
+    jac[i * 2] = i * i;
+    result[i] = i * i * i;
+  }
+
+  // Make a cost function that computes x - v2
+  VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
+  Matrix identity(kTestCostFunctionSize, kTestCostFunctionSize);
+  identity.setIdentity();
+  NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
+
+  std::vector<CostFunction*> conditioners;
+  for (int i = 0; i < kTestCostFunctionSize; i++) {
+    conditioners.push_back(new LinearCostFunction(i + 2, i * 7));
+  }
+
+  ConditionedCostFunction conditioned_cost_function(
+      difference_cost_function, conditioners, TAKE_OWNERSHIP);
+  EXPECT_EQ(difference_cost_function->num_residuals(),
+            conditioned_cost_function.num_residuals());
+  EXPECT_EQ(difference_cost_function->parameter_block_sizes(),
+            conditioned_cost_function.parameter_block_sizes());
+
+  double* parameters[1];
+  parameters[0] = v1;
+  double* jacs[1];
+  jacs[0] = jac;
+
+  conditioned_cost_function.Evaluate(parameters, result, jacs);
+  for (int i = 0; i < kTestCostFunctionSize; i++) {
+    EXPECT_DOUBLE_EQ((i + 2) * (v1[i] - v2[i]) + i * 7, result[i]);
+  }
+
+  for (int i = 0; i < kTestCostFunctionSize; i++) {
+    for (int j = 0; j < kTestCostFunctionSize; j++) {
+      double actual = jac[i * kTestCostFunctionSize + j];
+      if (i != j) {
+        EXPECT_DOUBLE_EQ(0, actual);
+      } else {
+        EXPECT_DOUBLE_EQ(i + 2, actual);
+      }
+    }
+  }
+}
+
+TEST(ConditionedCostFunction, SharedConditionersDoNotTriggerDoubleFree) {
+  // Make a cost function that computes x - v2
+  double v2[kTestCostFunctionSize];
+  VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
+  Matrix identity = Matrix::Identity(kTestCostFunctionSize, kTestCostFunctionSize);
+  NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
+  CostFunction* conditioner = new LinearCostFunction(2, 7);
+  std::vector<CostFunction*> conditioners;
+  for (int i = 0; i < kTestCostFunctionSize; i++) {
+    conditioners.push_back(conditioner);
+  }
+
+  ConditionedCostFunction conditioned_cost_function(
+      difference_cost_function, conditioners, TAKE_OWNERSHIP);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
new file mode 100644
index 0000000..c6f85c1
--- /dev/null
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -0,0 +1,247 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A preconditioned conjugate gradients solver
+// (ConjugateGradientsSolver) for positive semidefinite linear
+// systems.
+//
+// We have also augmented the termination criterion used by this
+// solver to support not just residual based termination but also
+// termination based on decrease in the value of the quadratic model
+// that CG optimizes.
+
+#include "ceres/conjugate_gradients_solver.h"
+
+#include <cmath>
+#include <cstddef>
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+bool IsZeroOrInfinity(double x) {
+  return ((x == 0.0) || std::isinf(x));
+}
+
+}  // namespace
+
+ConjugateGradientsSolver::ConjugateGradientsSolver(
+    const LinearSolver::Options& options)
+    : options_(options) {
+}
+
+LinearSolver::Summary ConjugateGradientsSolver::Solve(
+    LinearOperator* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  CHECK(A != nullptr);
+  CHECK(x != nullptr);
+  CHECK(b != nullptr);
+  CHECK_EQ(A->num_rows(), A->num_cols());
+
+  LinearSolver::Summary summary;
+  summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+  summary.message = "Maximum number of iterations reached.";
+  summary.num_iterations = 0;
+
+  const int num_cols = A->num_cols();
+  VectorRef xref(x, num_cols);
+  ConstVectorRef bref(b, num_cols);
+
+  const double norm_b = bref.norm();
+  if (norm_b == 0.0) {
+    xref.setZero();
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message = "Convergence. |b| = 0.";
+    return summary;
+  }
+
+  Vector r(num_cols);
+  Vector p(num_cols);
+  Vector z(num_cols);
+  Vector tmp(num_cols);
+
+  const double tol_r = per_solve_options.r_tolerance * norm_b;
+
+  tmp.setZero();
+  A->RightMultiply(x, tmp.data());
+  r = bref - tmp;
+  double norm_r = r.norm();
+  if (options_.min_num_iterations == 0 && norm_r <= tol_r) {
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message =
+        StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
+    return summary;
+  }
+
+  double rho = 1.0;
+
+  // Initial value of the quadratic model Q = x'Ax - 2 * b'x.
+  double Q0 = -1.0 * xref.dot(bref + r);
+
+  for (summary.num_iterations = 1;; ++summary.num_iterations) {
+    // Apply preconditioner
+    if (per_solve_options.preconditioner != NULL) {
+      z.setZero();
+      per_solve_options.preconditioner->RightMultiply(r.data(), z.data());
+    } else {
+      z = r;
+    }
+
+    double last_rho = rho;
+    rho = r.dot(z);
+    if (IsZeroOrInfinity(rho)) {
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho);
+      break;
+    }
+
+    if (summary.num_iterations == 1) {
+      p = z;
+    } else {
+      double beta = rho / last_rho;
+      if (IsZeroOrInfinity(beta)) {
+        summary.termination_type = LINEAR_SOLVER_FAILURE;
+        summary.message = StringPrintf(
+            "Numerical failure. beta = rho_n / rho_{n-1} = %e, "
+            "rho_n = %e, rho_{n-1} = %e", beta, rho, last_rho);
+        break;
+      }
+      p = z + beta * p;
+    }
+
+    Vector& q = z;
+    q.setZero();
+    A->RightMultiply(p.data(), q.data());
+    const double pq = p.dot(q);
+    if ((pq <= 0) || std::isinf(pq)) {
+      summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+      summary.message = StringPrintf(
+          "Matrix is indefinite, no more progress can be made. "
+          "p'q = %e. |p| = %e, |q| = %e",
+          pq, p.norm(), q.norm());
+      break;
+    }
+
+    const double alpha = rho / pq;
+    if (std::isinf(alpha)) {
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message =
+          StringPrintf("Numerical failure. alpha = rho / pq = %e, "
+                       "rho = %e, pq = %e.", alpha, rho, pq);
+      break;
+    }
+
+    xref = xref + alpha * p;
+
+    // Ideally we would just use the update r = r - alpha*q to keep
+    // track of the residual vector. However this estimate tends to
+    // drift over time due to round off errors. Thus every
+    // residual_reset_period iterations, we calculate the residual as
+    // r = b - Ax. We do not do this every iteration because this
+    // requires an additional matrix vector multiply which would
+    // double the complexity of the CG algorithm.
+    if (summary.num_iterations % options_.residual_reset_period == 0) {
+      tmp.setZero();
+      A->RightMultiply(x, tmp.data());
+      r = bref - tmp;
+    } else {
+      r = r - alpha * q;
+    }
+
+    // Quadratic model based termination.
+    //   Q1 = x'Ax - 2 * b' x.
+    const double Q1 = -1.0 * xref.dot(bref + r);
+
+    // For PSD matrices A, let
+    //
+    //   Q(x) = x'Ax - 2b'x
+    //
+    // be the cost of the quadratic function defined by A and b. Then,
+    // the solver terminates at iteration i if
+    //
+    //   i * (Q(x_i) - Q(x_i-1)) / Q(x_i) < q_tolerance.
+    //
+    // This termination criterion is more useful when using CG to
+    // solve the Newton step. This particular convergence test comes
+    // from Stephen Nash's work on truncated Newton
+    // methods. References:
+    //
+    //   1. Stephen G. Nash & Ariela Sofer, Assessing A Search
+    //   Direction Within A Truncated Newton Method, Operation
+    //   Research Letters 9(1990) 219-221.
+    //
+    //   2. Stephen G. Nash, A Survey of Truncated Newton Methods,
+    //   Journal of Computational and Applied Mathematics,
+    //   124(1-2), 45-59, 2000.
+    //
+    const double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
+    if (zeta < per_solve_options.q_tolerance &&
+        summary.num_iterations >= options_.min_num_iterations) {
+      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.message =
+          StringPrintf("Iteration: %d Convergence: zeta = %e < %e. |r| = %e",
+                       summary.num_iterations,
+                       zeta,
+                       per_solve_options.q_tolerance,
+                       r.norm());
+      break;
+    }
+    Q0 = Q1;
+
+    // Residual based termination.
+    norm_r = r. norm();
+    if (norm_r <= tol_r &&
+        summary.num_iterations >= options_.min_num_iterations) {
+      summary.termination_type = LINEAR_SOLVER_SUCCESS;
+      summary.message =
+          StringPrintf("Iteration: %d Convergence. |r| = %e <= %e.",
+                       summary.num_iterations,
+                       norm_r,
+                       tol_r);
+      break;
+    }
+
+    if (summary.num_iterations >= options_.max_num_iterations) {
+      break;
+    }
+  }
+
+  return summary;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.h b/internal/ceres/conjugate_gradients_solver.h
new file mode 100644
index 0000000..434cde0
--- /dev/null
+++ b/internal/ceres/conjugate_gradients_solver.h
@@ -0,0 +1,72 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Preconditioned Conjugate Gradients based solver for positive
+// semidefinite linear systems.
+
+#ifndef CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
+#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearOperator;
+
+// This class implements the now classical Conjugate Gradients
+// algorithm of Hestenes & Stiefel for solving postive semidefinite
+// linear sytems. Optionally it can use a preconditioner also to
+// reduce the condition number of the linear system and improve the
+// convergence rate. Modern references for Conjugate Gradients are the
+// books by Yousef Saad and Trefethen & Bau. This implementation of CG
+// has been augmented with additional termination tests that are
+// needed for forcing early termination when used as part of an
+// inexact Newton solver.
+//
+// For more details see the documentation for
+// LinearSolver::PerSolveOptions::r_tolerance and
+// LinearSolver::PerSolveOptions::q_tolerance in linear_solver.h.
+class ConjugateGradientsSolver : public LinearSolver {
+ public:
+  explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
+  virtual Summary Solve(LinearOperator* A,
+                        const double* b,
+                        const LinearSolver::PerSolveOptions& per_solve_options,
+                        double* x);
+
+ private:
+  const LinearSolver::Options options_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
diff --git a/internal/ceres/conjugate_gradients_solver_test.cc b/internal/ceres/conjugate_gradients_solver_test.cc
new file mode 100644
index 0000000..9311998
--- /dev/null
+++ b/internal/ceres/conjugate_gradients_solver_test.cc
@@ -0,0 +1,135 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: fredp@google.com (Fred Pighin)
+//
+// TODO(sameeragarwal): More comprehensive testing with larger and
+// more badly conditioned problem.
+
+#include <memory>
+#include "gtest/gtest.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(ConjugateGradientTest, Solves3x3IdentitySystem) {
+  double diagonal[] = { 1.0, 1.0, 1.0 };
+  std::unique_ptr<TripletSparseMatrix>
+      A(TripletSparseMatrix::CreateSparseDiagonalMatrix(diagonal, 3));
+  Vector b(3);
+  Vector x(3);
+
+  b(0) = 1.0;
+  b(1) = 2.0;
+  b(2) = 3.0;
+
+  x(0) = 1;
+  x(1) = 1;
+  x(2) = 1;
+
+  LinearSolver::Options options;
+  options.max_num_iterations = 10;
+
+  LinearSolver::PerSolveOptions per_solve_options;
+  per_solve_options.r_tolerance = 1e-9;
+
+  ConjugateGradientsSolver solver(options);
+  LinearSolver::Summary summary =
+      solver.Solve(A.get(), b.data(), per_solve_options, x.data());
+
+  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+  ASSERT_EQ(summary.num_iterations, 1);
+
+  ASSERT_DOUBLE_EQ(1, x(0));
+  ASSERT_DOUBLE_EQ(2, x(1));
+  ASSERT_DOUBLE_EQ(3, x(2));
+}
+
+
+TEST(ConjuateGradientTest, Solves3x3SymmetricSystem) {
+  std::unique_ptr<TripletSparseMatrix> A(new TripletSparseMatrix(3, 3, 9));
+  Vector b(3);
+  Vector x(3);
+
+  //      | 2  -1  0|
+  //  A = |-1   2 -1| is symmetric positive definite.
+  //      | 0  -1  2|
+  int* Ai = A->mutable_rows();
+  int* Aj = A->mutable_cols();
+  double* Ax = A->mutable_values();
+  int counter = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      Ai[counter] = i;
+      Aj[counter] = j;
+      ++counter;
+    }
+  }
+  Ax[0] = 2.;
+  Ax[1] = -1.;
+  Ax[2] = 0;
+  Ax[3] = -1.;
+  Ax[4] = 2;
+  Ax[5] = -1;
+  Ax[6] = 0;
+  Ax[7] = -1;
+  Ax[8] = 2;
+  A->set_num_nonzeros(9);
+
+  b(0) = -1;
+  b(1) = 0;
+  b(2) = 3;
+
+  x(0) = 1;
+  x(1) = 1;
+  x(2) = 1;
+
+  LinearSolver::Options options;
+  options.max_num_iterations = 10;
+
+  LinearSolver::PerSolveOptions per_solve_options;
+  per_solve_options.r_tolerance = 1e-9;
+
+  ConjugateGradientsSolver solver(options);
+  LinearSolver::Summary summary =
+      solver.Solve(A.get(), b.data(), per_solve_options, x.data());
+
+  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+  ASSERT_DOUBLE_EQ(0, x(0));
+  ASSERT_DOUBLE_EQ(1, x(1));
+  ASSERT_DOUBLE_EQ(2, x(2));
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/context.cc b/internal/ceres/context.cc
new file mode 100644
index 0000000..e223201
--- /dev/null
+++ b/internal/ceres/context.cc
@@ -0,0 +1,41 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#include "ceres/context.h"
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+
+Context* Context::Create() {
+  return new internal::ContextImpl();
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/context_impl.cc b/internal/ceres/context_impl.cc
new file mode 100644
index 0000000..1b9662f
--- /dev/null
+++ b/internal/ceres/context_impl.cc
@@ -0,0 +1,43 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+namespace internal {
+
+void ContextImpl::EnsureMinimumThreads(int num_threads) {
+#ifdef CERES_USE_CXX11_THREADS
+  thread_pool.Resize(num_threads);
+#endif  // CERES_USE_CXX11_THREADS
+
+}
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/context_impl.h b/internal/ceres/context_impl.h
new file mode 100644
index 0000000..d83b77a
--- /dev/null
+++ b/internal/ceres/context_impl.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_CONTEXT_IMPL_H_
+#define CERES_INTERNAL_CONTEXT_IMPL_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include "ceres/context.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+#include "ceres/thread_pool.h"
+#endif  // CERES_USE_CXX11_THREADS
+
+namespace ceres {
+namespace internal {
+
+class ContextImpl : public Context {
+ public:
+  ContextImpl() {}
+  ContextImpl(const ContextImpl&) = delete;
+  void operator=(const ContextImpl&) = delete;
+
+  virtual ~ContextImpl() {}
+
+  // When compiled with C++11 threading support, resize the thread pool to have
+  // at min(num_thread, num_hardware_threads) where num_hardware_threads is
+  // defined by the hardware.  Otherwise this call is a no-op.
+  void EnsureMinimumThreads(int num_threads);
+
+#ifdef CERES_USE_CXX11_THREADS
+  ThreadPool thread_pool;
+#endif  // CERES_USE_CXX11_THREADS
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CONTEXT_IMPL_H_
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
new file mode 100644
index 0000000..c5d56f3
--- /dev/null
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -0,0 +1,277 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/coordinate_descent_minimizer.h"
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "ceres/evaluator.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/parallel_for.h"
+#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+using std::map;
+using std::max;
+using std::min;
+using std::set;
+using std::string;
+using std::vector;
+
+CoordinateDescentMinimizer::CoordinateDescentMinimizer(ContextImpl* context)
+    : context_(context) {
+  CHECK(context_ != nullptr);
+}
+
+CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
+}
+
+bool CoordinateDescentMinimizer::Init(
+    const Program& program,
+    const ProblemImpl::ParameterMap& parameter_map,
+    const ParameterBlockOrdering& ordering,
+    string* error) {
+  parameter_blocks_.clear();
+  independent_set_offsets_.clear();
+  independent_set_offsets_.push_back(0);
+
+  // Serialize the OrderedGroups into a vector of parameter block
+  // offsets for parallel access.
+  map<ParameterBlock*, int> parameter_block_index;
+  map<int, set<double*>> group_to_elements = ordering.group_to_elements();
+  for (const auto& g_t_e : group_to_elements) {
+    const auto& elements = g_t_e.second;
+    for (double* parameter_block: elements) {
+      parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
+      parameter_block_index[parameter_blocks_.back()] =
+          parameter_blocks_.size() - 1;
+    }
+    independent_set_offsets_.push_back(
+        independent_set_offsets_.back() + elements.size());
+  }
+
+  // The ordering does not have to contain all parameter blocks, so
+  // assign zero offsets/empty independent sets to these parameter
+  // blocks.
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    if (!ordering.IsMember(parameter_blocks[i]->mutable_user_state())) {
+      parameter_blocks_.push_back(parameter_blocks[i]);
+      independent_set_offsets_.push_back(independent_set_offsets_.back());
+    }
+  }
+
+  // Compute the set of residual blocks that depend on each parameter
+  // block.
+  residual_blocks_.resize(parameter_block_index.size());
+  const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks[i];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      const auto it = parameter_block_index.find(parameter_block);
+      if (it != parameter_block_index.end()) {
+        residual_blocks_[it->second].push_back(residual_block);
+      }
+    }
+  }
+
+  evaluator_options_.linear_solver_type = DENSE_QR;
+  evaluator_options_.num_eliminate_blocks = 0;
+  evaluator_options_.num_threads = 1;
+  evaluator_options_.context = context_;
+
+  return true;
+}
+
+void CoordinateDescentMinimizer::Minimize(
+    const Minimizer::Options& options,
+    double* parameters,
+    Solver::Summary* summary) {
+  // Set the state and mark all parameter blocks constant.
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks_[i];
+    parameter_block->SetState(parameters + parameter_block->state_offset());
+    parameter_block->SetConstant();
+  }
+
+  std::unique_ptr<LinearSolver*[]> linear_solvers(
+      new LinearSolver*[options.num_threads]);
+
+  LinearSolver::Options linear_solver_options;
+  linear_solver_options.type = DENSE_QR;
+  linear_solver_options.context = context_;
+
+  for (int i = 0; i < options.num_threads; ++i) {
+    linear_solvers[i] = LinearSolver::Create(linear_solver_options);
+  }
+
+  for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) {
+    const int num_problems =
+        independent_set_offsets_[i + 1] - independent_set_offsets_[i];
+    // Avoid parallelization overhead call if the set is empty.
+    if (num_problems == 0) {
+      continue;
+    }
+
+    const int num_inner_iteration_threads =
+        min(options.num_threads, num_problems);
+    evaluator_options_.num_threads =
+        max(1, options.num_threads / num_inner_iteration_threads);
+
+    // The parameter blocks in each independent set can be optimized
+    // in parallel, since they do not co-occur in any residual block.
+    ParallelFor(
+        context_,
+        independent_set_offsets_[i],
+        independent_set_offsets_[i + 1],
+        num_inner_iteration_threads,
+        [&](int thread_id, int j) {
+          ParameterBlock* parameter_block = parameter_blocks_[j];
+          const int old_index = parameter_block->index();
+          const int old_delta_offset = parameter_block->delta_offset();
+          parameter_block->SetVarying();
+          parameter_block->set_index(0);
+          parameter_block->set_delta_offset(0);
+
+          Program inner_program;
+          inner_program.mutable_parameter_blocks()->push_back(parameter_block);
+          *inner_program.mutable_residual_blocks() = residual_blocks_[j];
+
+          // TODO(sameeragarwal): Better error handling. Right now we
+          // assume that this is not going to lead to problems of any
+          // sort. Basically we should be checking for numerical failure
+          // of some sort.
+          //
+          // On the other hand, if the optimization is a failure, that in
+          // some ways is fine, since it won't change the parameters and
+          // we are fine.
+          Solver::Summary inner_summary;
+          Solve(&inner_program,
+                linear_solvers[thread_id],
+                parameters + parameter_block->state_offset(),
+                &inner_summary);
+
+          parameter_block->set_index(old_index);
+          parameter_block->set_delta_offset(old_delta_offset);
+          parameter_block->SetState(parameters +
+                                    parameter_block->state_offset());
+          parameter_block->SetConstant();
+        });
+  }
+
+  for (int i =  0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->SetVarying();
+  }
+
+  for (int i = 0; i < options.num_threads; ++i) {
+    delete linear_solvers[i];
+  }
+}
+
+// Solve the optimization problem for one parameter block.
+void CoordinateDescentMinimizer::Solve(Program* program,
+                                       LinearSolver* linear_solver,
+                                       double* parameter,
+                                       Solver::Summary* summary) {
+  *summary = Solver::Summary();
+  summary->initial_cost = 0.0;
+  summary->fixed_cost = 0.0;
+  summary->final_cost = 0.0;
+  string error;
+
+  Minimizer::Options minimizer_options;
+  minimizer_options.evaluator.reset(
+      Evaluator::Create(evaluator_options_, program, &error));
+  CHECK(minimizer_options.evaluator != nullptr);
+  minimizer_options.jacobian.reset(
+      minimizer_options.evaluator->CreateJacobian());
+  CHECK(minimizer_options.jacobian != nullptr);
+
+  TrustRegionStrategy::Options trs_options;
+  trs_options.linear_solver = linear_solver;
+  minimizer_options.trust_region_strategy.reset(
+      TrustRegionStrategy::Create(trs_options));
+  CHECK(minimizer_options.trust_region_strategy != nullptr);
+  minimizer_options.is_silent = true;
+
+  TrustRegionMinimizer minimizer;
+  minimizer.Minimize(minimizer_options, parameter, summary);
+}
+
+bool CoordinateDescentMinimizer::IsOrderingValid(
+    const Program& program,
+    const ParameterBlockOrdering& ordering,
+    string* message) {
+  const map<int, set<double*>>& group_to_elements =
+      ordering.group_to_elements();
+
+  // Verify that each group is an independent set
+  for (const auto& g_t_e : group_to_elements) {
+    if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
+      *message =
+          StringPrintf("The user-provided "
+                       "parameter_blocks_for_inner_iterations does not "
+                       "form an independent set. Group Id: %d", g_t_e.first);
+      return false;
+    }
+  }
+  return true;
+}
+
+// Find a recursive decomposition of the Hessian matrix as a set
+// of independent sets of decreasing size and invert it. This
+// seems to work better in practice, i.e., Cameras before
+// points.
+ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
+    const Program& program) {
+  std::unique_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
+  ComputeRecursiveIndependentSetOrdering(program, ordering.get());
+  ordering->Reverse();
+  return ordering.release();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h
new file mode 100644
index 0000000..3bbcc2d
--- /dev/null
+++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -0,0 +1,108 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
+#define CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
+
+#include <string>
+#include <vector>
+
+#include "ceres/context_impl.h"
+#include "ceres/evaluator.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem_impl.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class LinearSolver;
+
+// Given a Program, and a ParameterBlockOrdering which partitions
+// (non-exhaustively) the Hessian matrix into independent sets,
+// perform coordinate descent on the parameter blocks in the
+// ordering. The independent set structure allows for all parameter
+// blocks in the same independent set to be optimized in parallel, and
+// the order of the independent set determines the order in which the
+// parameter block groups are optimized.
+//
+// The minimizer assumes that none of the parameter blocks in the
+// program are constant.
+class CoordinateDescentMinimizer : public Minimizer {
+ public:
+  explicit CoordinateDescentMinimizer(ContextImpl* context);
+
+  bool Init(const Program& program,
+            const ProblemImpl::ParameterMap& parameter_map,
+            const ParameterBlockOrdering& ordering,
+            std::string* error);
+
+  // Minimizer interface.
+  virtual ~CoordinateDescentMinimizer();
+
+  virtual void Minimize(const Minimizer::Options& options,
+                        double* parameters,
+                        Solver::Summary* summary);
+
+  // Verify that each group in the ordering forms an independent set.
+  static bool IsOrderingValid(const Program& program,
+                              const ParameterBlockOrdering& ordering,
+                              std::string* message);
+
+  // Find a recursive decomposition of the Hessian matrix as a set
+  // of independent sets of decreasing size and invert it. This
+  // seems to work better in practice, i.e., Cameras before
+  // points.
+  static ParameterBlockOrdering* CreateOrdering(const Program& program);
+
+ private:
+  void Solve(Program* program,
+             LinearSolver* linear_solver,
+             double* parameters,
+             Solver::Summary* summary);
+
+  std::vector<ParameterBlock*> parameter_blocks_;
+  std::vector<std::vector<ResidualBlock*>> residual_blocks_;
+  // The optimization is performed in rounds. In each round all the
+  // parameter blocks that form one independent set are optimized in
+  // parallel. This array, marks the boundaries of the independent
+  // sets in parameter_blocks_.
+  std::vector<int> independent_set_offsets_;
+
+  Evaluator::Options evaluator_options_;
+
+  ContextImpl* context_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
new file mode 100644
index 0000000..4ac0dc3
--- /dev/null
+++ b/internal/ceres/corrector.cc
@@ -0,0 +1,158 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/corrector.h"
+
+#include <cstddef>
+#include <cmath>
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Corrector::Corrector(const double sq_norm, const double rho[3]) {
+  CHECK_GE(sq_norm, 0.0);
+  sqrt_rho1_ = sqrt(rho[1]);
+
+  // If sq_norm = 0.0, the correction becomes trivial, the residual
+  // and the jacobian are scaled by the square root of the derivative
+  // of rho. Handling this case explicitly avoids the divide by zero
+  // error that would occur below.
+  //
+  // The case where rho'' < 0 also gets special handling. Technically
+  // it shouldn't, and the computation of the scaling should proceed
+  // as below, however we found in experiments that applying the
+  // curvature correction when rho'' < 0, which is the case when we
+  // are in the outlier region slows down the convergence of the
+  // algorithm significantly.
+  //
+  // Thus, we have divided the action of the robustifier into two
+  // parts. In the inliner region, we do the full second order
+  // correction which re-wights the gradient of the function by the
+  // square root of the derivative of rho, and the Gauss-Newton
+  // Hessian gets both the scaling and the rank-1 curvature
+  // correction. Normally, alpha is upper bounded by one, but with this
+  // change, alpha is bounded above by zero.
+  //
+  // Empirically we have observed that the full Triggs correction and
+  // the clamped correction both start out as very good approximations
+  // to the loss function when we are in the convex part of the
+  // function, but as the function starts transitioning from convex to
+  // concave, the Triggs approximation diverges more and more and
+  // ultimately becomes linear. The clamped Triggs model however
+  // remains quadratic.
+  //
+  // The reason why the Triggs approximation becomes so poor is
+  // because the curvature correction that it applies to the gauss
+  // newton hessian goes from being a full rank correction to a rank
+  // deficient correction making the inversion of the Hessian fraught
+  // with all sorts of misery and suffering.
+  //
+  // The clamped correction retains its quadratic nature and inverting it
+  // is always well formed.
+  if ((sq_norm == 0.0) || (rho[2] <= 0.0)) {
+    residual_scaling_ = sqrt_rho1_;
+    alpha_sq_norm_ = 0.0;
+    return;
+  }
+
+  // We now require that the first derivative of the loss function be
+  // positive only if the second derivative is positive. This is
+  // because when the second derivative is non-positive, we do not use
+  // the second order correction suggested by BANS and instead use a
+  // simpler first order strategy which does not use a division by the
+  // gradient of the loss function.
+  CHECK_GT(rho[1], 0.0);
+
+  // Calculate the smaller of the two solutions to the equation
+  //
+  // 0.5 *  alpha^2 - alpha - rho'' / rho' *  z'z = 0.
+  //
+  // Start by calculating the discriminant D.
+  const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];
+
+  // Since both rho[1] and rho[2] are guaranteed to be positive at
+  // this point, we know that D > 1.0.
+
+  const double alpha = 1.0 - sqrt(D);
+
+  // Calculate the constants needed by the correction routines.
+  residual_scaling_ = sqrt_rho1_ / (1 - alpha);
+  alpha_sq_norm_ = alpha / sq_norm;
+}
+
+void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
+  DCHECK(residuals != NULL);
+  // Equation 11 in BANS.
+  VectorRef(residuals, num_rows) *= residual_scaling_;
+}
+
+void Corrector::CorrectJacobian(const int num_rows,
+                                const int num_cols,
+                                double* residuals,
+                                double* jacobian) {
+  DCHECK(residuals != NULL);
+  DCHECK(jacobian != NULL);
+
+  // The common case (rho[2] <= 0).
+  if (alpha_sq_norm_ == 0.0) {
+    VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_;
+    return;
+  }
+
+  // Equation 11 in BANS.
+  //
+  //  J = sqrt(rho) * (J - alpha^2 r * r' J)
+  //
+  // In days gone by this loop used to be a single Eigen expression of
+  // the form
+  //
+  //  J = sqrt_rho1_ * (J - alpha_sq_norm_ * r* (r.transpose() * J));
+  //
+  // Which turns out to about 17x slower on bal problems. The reason
+  // is that Eigen is unable to figure out that this expression can be
+  // evaluated columnwise and ends up creating a temporary.
+  for (int c = 0; c < num_cols; ++c) {
+    double r_transpose_j = 0.0;
+    for (int r = 0; r < num_rows; ++r) {
+      r_transpose_j += jacobian[r * num_cols + c] * residuals[r];
+    }
+
+    for (int r = 0; r < num_rows; ++r) {
+      jacobian[r * num_cols + c] = sqrt_rho1_ *
+          (jacobian[r * num_cols + c] -
+           alpha_sq_norm_ * residuals[r] * r_transpose_j);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/corrector.h b/internal/ceres/corrector.h
new file mode 100644
index 0000000..a5b03dd
--- /dev/null
+++ b/internal/ceres/corrector.h
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Class definition for the object that is responsible for applying a
+// second order correction to the Gauss-Newton based on the ideas in
+// BANS by Triggs et al.
+
+#ifndef CERES_INTERNAL_CORRECTOR_H_
+#define CERES_INTERNAL_CORRECTOR_H_
+
+namespace ceres {
+namespace internal {
+
+// Corrector is responsible for applying the second order correction
+// to the residual and jacobian of a least squares problem based on a
+// radial robust loss.
+//
+// The key idea here is to look at the expressions for the robustified
+// gauss newton approximation and then take its square root to get the
+// corresponding corrections to the residual and jacobian.  For the
+// full expressions see Eq. 10 and 11 in BANS by Triggs et al.
+class Corrector {
+ public:
+  // The constructor takes the squared norm, the value, the first and
+  // second derivatives of the LossFunction. It precalculates some of
+  // the constants that are needed to apply the correction. The
+  // correction constant alpha is constrained to be smaller than 1, if
+  // it becomes larger than 1, then it will reverse the sign of the
+  // residual and the correction. If alpha is equal to 1 will result
+  // in a divide by zero error. Thus we constrain alpha to be upper
+  // bounded by 1 - epsilon_.
+  //
+  // rho[1] needs to be positive. The constructor will crash if this
+  // condition is not met.
+  //
+  // In practical use CorrectJacobian should always be called before
+  // CorrectResidual, because the jacobian correction depends on the
+  // value of the uncorrected residual values.
+  explicit Corrector(double sq_norm, const double rho[3]);
+
+  // residuals *= sqrt(rho[1]) / (1 - alpha)
+  void CorrectResiduals(int num_rows, double* residuals);
+
+  // jacobian = sqrt(rho[1]) * jacobian -
+  // sqrt(rho[1]) * alpha / sq_norm * residuals residuals' * jacobian.
+  //
+  // The method assumes that the jacobian has row-major storage. It is
+  // the caller's responsibility to ensure that the pointer to
+  // jacobian is not null.
+  void CorrectJacobian(int num_rows,
+                       int num_cols,
+                       double* residuals,
+                       double* jacobian);
+
+ private:
+  double sqrt_rho1_;
+  double residual_scaling_;
+  double alpha_sq_norm_;
+};
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_CORRECTOR_H_
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
new file mode 100644
index 0000000..a6581fd
--- /dev/null
+++ b/internal/ceres/corrector_test.cc
@@ -0,0 +1,276 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/corrector.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <cstdlib>
+#include "gtest/gtest.h"
+#include "ceres/random.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// If rho[1] is zero, the Corrector constructor should crash.
+TEST(Corrector, ZeroGradientDeathTest) {
+  const double kRho[] = {0.0, 0.0, 1.0};
+  EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
+               ".*");
+}
+
+// If rho[1] is negative, the Corrector constructor should crash.
+TEST(Corrector, NegativeGradientDeathTest) {
+  const double kRho[] = {0.0, -0.1, 1.0};
+  EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
+               ".*");
+}
+
+TEST(Corrector, ScalarCorrection) {
+  double residuals = sqrt(3.0);
+  double jacobian = 10.0;
+  double sq_norm = residuals * residuals;
+
+  const double kRho[] = {sq_norm, 0.1, -0.01};
+
+  // In light of the rho'' < 0 clamping now implemented in
+  // corrector.cc, alpha = 0 whenever rho'' < 0.
+  const double kAlpha = 0.0;
+
+  // Thus the expected value of the residual is
+  // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
+  const double kExpectedResidual =
+      residuals * sqrt(kRho[1]) / (1 - kAlpha);
+
+  // The jacobian in this case will be
+  // sqrt(kRho[1]) * (1 - kAlpha) * jacobian.
+  const double kExpectedJacobian = sqrt(kRho[1]) * (1 - kAlpha) * jacobian;
+
+  Corrector c(sq_norm, kRho);
+  c.CorrectJacobian(1.0, 1.0, &residuals, &jacobian);
+  c.CorrectResiduals(1.0, &residuals);
+
+  ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+  ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+TEST(Corrector, ScalarCorrectionZeroResidual) {
+  double residuals = 0.0;
+  double jacobian = 10.0;
+  double sq_norm = residuals * residuals;
+
+  const double kRho[] = {0.0, 0.1, -0.01};
+  Corrector c(sq_norm, kRho);
+
+  // The alpha equation is
+  // 1/2 alpha^2 - alpha + 0.0 = 0.
+  // i.e. alpha = 1.0 - sqrt(1.0).
+  //      alpha = 0.0.
+  // Thus the expected value of the residual is
+  // residual[i] * sqrt(kRho[1])
+  const double kExpectedResidual = residuals * sqrt(kRho[1]);
+
+  // The jacobian in this case will be
+  // sqrt(kRho[1]) * jacobian.
+  const double kExpectedJacobian = sqrt(kRho[1]) * jacobian;
+
+  c.CorrectJacobian(1, 1, &residuals, &jacobian);
+  c.CorrectResiduals(1, &residuals);
+
+  ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+  ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+// Scaling behaviour for one dimensional functions.
+TEST(Corrector, ScalarCorrectionAlphaClamped) {
+  double residuals = sqrt(3.0);
+  double jacobian = 10.0;
+  double sq_norm = residuals * residuals;
+
+  const double kRho[] = {3, 0.1, -0.1};
+
+  // rho[2] < 0 -> alpha = 0.0
+  const double kAlpha = 0.0;
+
+  // Thus the expected value of the residual is
+  // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
+  const double kExpectedResidual =
+      residuals * sqrt(kRho[1]) / (1.0 - kAlpha);
+
+  // The jacobian in this case will be scaled by
+  // sqrt(rho[1]) * (1 - alpha) * J.
+  const double kExpectedJacobian = sqrt(kRho[1]) *
+      (1.0 - kAlpha) * jacobian;
+
+  Corrector c(sq_norm, kRho);
+  c.CorrectJacobian(1, 1, &residuals, &jacobian);
+  c.CorrectResiduals(1, &residuals);
+
+  ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+  ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+// Test that the corrected multidimensional residual and jacobians
+// match the expected values and the resulting modified normal
+// equations match the robustified gauss newton approximation.
+TEST(Corrector, MultidimensionalGaussNewtonApproximation) {
+  double residuals[3];
+  double jacobian[2 * 3];
+  double rho[3];
+
+  // Eigen matrix references for linear algebra.
+  MatrixRef jac(jacobian, 3, 2);
+  VectorRef res(residuals, 3);
+
+  // Ground truth values of the modified jacobian and residuals.
+  Matrix g_jac(3, 2);
+  Vector g_res(3);
+
+  // Ground truth values of the robustified Gauss-Newton
+  // approximation.
+  Matrix g_hess(2, 2);
+  Vector g_grad(2);
+
+  // Corrected hessian and gradient implied by the modified jacobian
+  // and hessians.
+  Matrix c_hess(2, 2);
+  Vector c_grad(2);
+
+  srand(5);
+  for (int iter = 0; iter < 10000; ++iter) {
+    // Initialize the jacobian and residual.
+    for (int i = 0; i < 2 * 3; ++i)
+      jacobian[i] = RandDouble();
+    for (int i = 0; i < 3; ++i)
+      residuals[i] = RandDouble();
+
+    const double sq_norm = res.dot(res);
+
+    rho[0] = sq_norm;
+    rho[1] = RandDouble();
+    rho[2] = 2.0 * RandDouble() - 1.0;
+
+    // If rho[2] > 0, then the curvature correction to the correction
+    // and the gauss newton approximation will match. Otherwise, we
+    // will clamp alpha to 0.
+
+    const double kD = 1 + 2 * rho[2] / rho[1] * sq_norm;
+    const double kAlpha = (rho[2] > 0.0) ? 1 - sqrt(kD) : 0.0;
+
+    // Ground truth values.
+    g_res = sqrt(rho[1]) / (1.0 - kAlpha) * res;
+    g_jac = sqrt(rho[1]) * (jac - kAlpha / sq_norm *
+                            res * res.transpose() * jac);
+
+    g_grad = rho[1] * jac.transpose() * res;
+    g_hess = rho[1] * jac.transpose() * jac +
+        2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+
+    Corrector c(sq_norm, rho);
+    c.CorrectJacobian(3, 2, residuals, jacobian);
+    c.CorrectResiduals(3, residuals);
+
+    // Corrected gradient and hessian.
+    c_grad  = jac.transpose() * res;
+    c_hess = jac.transpose() * jac;
+
+    ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
+    ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
+
+    ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
+  }
+}
+
+TEST(Corrector, MultidimensionalGaussNewtonApproximationZeroResidual) {
+  double residuals[3];
+  double jacobian[2 * 3];
+  double rho[3];
+
+  // Eigen matrix references for linear algebra.
+  MatrixRef jac(jacobian, 3, 2);
+  VectorRef res(residuals, 3);
+
+  // Ground truth values of the modified jacobian and residuals.
+  Matrix g_jac(3, 2);
+  Vector g_res(3);
+
+  // Ground truth values of the robustified Gauss-Newton
+  // approximation.
+  Matrix g_hess(2, 2);
+  Vector g_grad(2);
+
+  // Corrected hessian and gradient implied by the modified jacobian
+  // and hessians.
+  Matrix c_hess(2, 2);
+  Vector c_grad(2);
+
+  srand(5);
+  for (int iter = 0; iter < 10000; ++iter) {
+    // Initialize the jacobian.
+    for (int i = 0; i < 2 * 3; ++i)
+      jacobian[i] = RandDouble();
+
+    // Zero residuals
+    res.setZero();
+
+    const double sq_norm = res.dot(res);
+
+    rho[0] = sq_norm;
+    rho[1] = RandDouble();
+    rho[2] = 2 * RandDouble() - 1.0;
+
+    // Ground truth values.
+    g_res = sqrt(rho[1]) * res;
+    g_jac = sqrt(rho[1]) * jac;
+
+    g_grad = rho[1] * jac.transpose() * res;
+    g_hess = rho[1] * jac.transpose() * jac +
+        2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+
+    Corrector c(sq_norm, rho);
+    c.CorrectJacobian(3, 2, residuals, jacobian);
+    c.CorrectResiduals(3, residuals);
+
+    // Corrected gradient and hessian.
+    c_grad = jac.transpose() * res;
+    c_hess = jac.transpose() * jac;
+
+    ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
+    ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
+
+    ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
+    ASSERT_NEAR((g_hess - c_hess).norm(), 0.0, 1e-10);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
new file mode 100644
index 0000000..52687ea
--- /dev/null
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -0,0 +1,354 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/cost_function_to_functor.h"
+
+#include <cstdint>
+#include <memory>
+#include "ceres/dynamic_autodiff_cost_function.h"
+#include "ceres/dynamic_cost_function_to_functor.h"
+#include "ceres/autodiff_cost_function.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+const double kTolerance = 1e-18;
+
+void ExpectCostFunctionsAreEqual(const CostFunction& cost_function,
+                                 const CostFunction& actual_cost_function) {
+  EXPECT_EQ(cost_function.num_residuals(),
+            actual_cost_function.num_residuals());
+  const int num_residuals = cost_function.num_residuals();
+  const vector<int32_t>& parameter_block_sizes =
+      cost_function.parameter_block_sizes();
+  const vector<int32_t>& actual_parameter_block_sizes =
+      actual_cost_function.parameter_block_sizes();
+  EXPECT_EQ(parameter_block_sizes.size(),
+            actual_parameter_block_sizes.size());
+
+  int num_parameters = 0;
+  for (int i = 0; i < parameter_block_sizes.size(); ++i) {
+    EXPECT_EQ(parameter_block_sizes[i], actual_parameter_block_sizes[i]);
+    num_parameters += parameter_block_sizes[i];
+  }
+
+  std::unique_ptr<double[]> parameters(new double[num_parameters]);
+  for (int i = 0; i < num_parameters; ++i) {
+    parameters[i] = static_cast<double>(i) + 1.0;
+  }
+
+  std::unique_ptr<double[]> residuals(new double[num_residuals]);
+  std::unique_ptr<double[]> jacobians(new double[num_parameters * num_residuals]);
+
+  std::unique_ptr<double[]> actual_residuals(new double[num_residuals]);
+  std::unique_ptr<double[]> actual_jacobians
+      (new double[num_parameters * num_residuals]);
+
+  std::unique_ptr<double*[]> parameter_blocks(
+      new double*[parameter_block_sizes.size()]);
+  std::unique_ptr<double*[]> jacobian_blocks(
+      new double*[parameter_block_sizes.size()]);
+  std::unique_ptr<double*[]> actual_jacobian_blocks(
+      new double*[parameter_block_sizes.size()]);
+
+  num_parameters = 0;
+  for (int i = 0; i < parameter_block_sizes.size(); ++i) {
+    parameter_blocks[i] = parameters.get() + num_parameters;
+    jacobian_blocks[i] = jacobians.get() + num_parameters * num_residuals;
+    actual_jacobian_blocks[i] =
+        actual_jacobians.get() + num_parameters * num_residuals;
+    num_parameters += parameter_block_sizes[i];
+  }
+
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(),
+                                     residuals.get(), NULL));
+  EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(),
+                                            actual_residuals.get(), NULL));
+  for (int i = 0; i < num_residuals; ++i) {
+    EXPECT_NEAR(residuals[i], actual_residuals[i], kTolerance)
+        << "residual id: " << i;
+  }
+
+
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.get(),
+                                     residuals.get(),
+                                     jacobian_blocks.get()));
+  EXPECT_TRUE(actual_cost_function.Evaluate(parameter_blocks.get(),
+                                            actual_residuals.get(),
+                                            actual_jacobian_blocks.get()));
+  for (int i = 0; i < num_residuals; ++i) {
+    EXPECT_NEAR(residuals[i], actual_residuals[i], kTolerance)
+        << "residual : " << i;
+  }
+
+  for (int i = 0; i < num_residuals * num_parameters; ++i) {
+    EXPECT_NEAR(jacobians[i], actual_jacobians[i], kTolerance)
+        << "jacobian : " << i << " "
+        << jacobians[i] << " " << actual_jacobians[i];
+  }
+}
+
+struct OneParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, T* residuals) const {
+    residuals[0] = x1[0] * x1[0];
+    residuals[1] = x1[1] * x1[1];
+    return true;
+  }
+};
+
+struct TwoParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1];
+    return true;
+  }
+};
+
+struct ThreeParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1];
+    return true;
+  }
+};
+
+struct FourParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1];
+    return true;
+  }
+};
+
+struct FiveParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1];
+    return true;
+  }
+};
+
+struct SixParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, const T* x6,  T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1];
+    return true;
+  }
+};
+
+struct SevenParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, const T* x6, const T* x7, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1];
+    return true;
+  }
+};
+
+struct EightParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, const T* x6, const T* x7, const T* x8,
+                  T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
+        + x8[0] * x8[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
+        + x8[1] * x8[1];
+    return true;
+  }
+};
+
+struct NineParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, const T* x6, const T* x7, const T* x8,
+                  const T* x9, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
+        + x8[0] * x8[0] + x9[0] * x9[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
+        + x8[1] * x8[1] + x9[1] * x9[1];
+    return true;
+  }
+};
+
+struct TenParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(const T* x1, const T* x2, const T* x3, const T* x4,
+                  const T* x5, const T* x6, const T* x7, const T* x8,
+                  const T* x9, const T* x10, T* residuals) const {
+    residuals[0] = x1[0] * x1[0]  + x2[0] * x2[0] + x3[0] * x3[0]
+        + x4[0] * x4[0] + x5[0] * x5[0] + x6[0] * x6[0] + x7[0] * x7[0]
+        + x8[0] * x8[0] + x9[0] * x9[0] + x10[0] * x10[0];
+    residuals[1] = x1[1] * x1[1]  + x2[1] * x2[1] + x3[1] * x3[1]
+        + x4[1] * x4[1] + x5[1] * x5[1] + x6[1] * x6[1] + x7[1] * x7[1]
+        + x8[1] * x8[1] + x9[1] * x9[1] + x10[1] * x10[1];
+    return true;
+  }
+};
+
+class DynamicTwoParameterBlockFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    for (int i = 0; i < 2; ++i) {
+      residuals[0] = parameters[i][0] * parameters[i][0];
+      residuals[1] = parameters[i][1] * parameters[i][1];
+    }
+    return true;
+  }
+};
+
+#define TEST_BODY(NAME)                                                 \
+  TEST(CostFunctionToFunctor, NAME) {                                   \
+    std::unique_ptr<CostFunction> cost_function(                             \
+        new AutoDiffCostFunction<                                       \
+            CostFunctionToFunctor<2, PARAMETER_BLOCK_SIZES >,           \
+                2, PARAMETER_BLOCK_SIZES>(new CostFunctionToFunctor<    \
+                    2, PARAMETER_BLOCK_SIZES >(                         \
+                        new AutoDiffCostFunction<                       \
+                            NAME##Functor, 2, PARAMETER_BLOCK_SIZES >(  \
+                  new NAME##Functor))));                                \
+                                                                        \
+std::unique_ptr<CostFunction> actual_cost_function(                          \
+    new AutoDiffCostFunction<NAME##Functor, 2, PARAMETER_BLOCK_SIZES >( \
+        new NAME##Functor));                                            \
+ExpectCostFunctionsAreEqual(*cost_function, *actual_cost_function);     \
+}
+
+#define PARAMETER_BLOCK_SIZES 2
+TEST_BODY(OneParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2
+TEST_BODY(TwoParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2
+TEST_BODY(ThreeParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2
+TEST_BODY(FourParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2
+TEST_BODY(FiveParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2
+TEST_BODY(SixParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2
+TEST_BODY(SevenParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2
+TEST_BODY(EightParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2,2
+TEST_BODY(NineParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#define PARAMETER_BLOCK_SIZES 2,2,2,2,2,2,2,2,2,2
+TEST_BODY(TenParameterBlock)
+#undef PARAMETER_BLOCK_SIZES
+
+#undef TEST_BODY
+
+TEST(CostFunctionToFunctor, DynamicNumberOfResiduals) {
+  std::unique_ptr<CostFunction> cost_function(
+      new AutoDiffCostFunction<
+      CostFunctionToFunctor<ceres::DYNAMIC, 2, 2 >, ceres::DYNAMIC, 2, 2>(
+          new CostFunctionToFunctor<ceres::DYNAMIC, 2, 2 >(
+              new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2 >(
+                  new TwoParameterBlockFunctor)), 2));
+
+  std::unique_ptr<CostFunction> actual_cost_function(
+      new AutoDiffCostFunction<TwoParameterBlockFunctor, 2, 2, 2 >(
+          new TwoParameterBlockFunctor));
+  ExpectCostFunctionsAreEqual(*cost_function, *actual_cost_function);
+}
+
+TEST(CostFunctionToFunctor, DynamicCostFunctionToFunctor) {
+  DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>*
+      actual_cost_function(
+      new DynamicAutoDiffCostFunction<DynamicTwoParameterBlockFunctor>(
+          new DynamicTwoParameterBlockFunctor));
+  actual_cost_function->AddParameterBlock(2);
+  actual_cost_function->AddParameterBlock(2);
+  actual_cost_function->SetNumResiduals(2);
+
+  DynamicAutoDiffCostFunction<DynamicCostFunctionToFunctor> cost_function(
+      new DynamicCostFunctionToFunctor(actual_cost_function));
+  cost_function.AddParameterBlock(2);
+  cost_function.AddParameterBlock(2);
+  cost_function.SetNumResiduals(2);
+
+  ExpectCostFunctionsAreEqual(cost_function, *actual_cost_function);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/covariance.cc b/internal/ceres/covariance.cc
new file mode 100644
index 0000000..068cd9c
--- /dev/null
+++ b/internal/ceres/covariance.cc
@@ -0,0 +1,99 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/covariance.h"
+
+#include <utility>
+#include <vector>
+#include "ceres/covariance_impl.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+
+using std::make_pair;
+using std::pair;
+using std::vector;
+
+Covariance::Covariance(const Covariance::Options& options) {
+  impl_.reset(new internal::CovarianceImpl(options));
+}
+
+Covariance::~Covariance() {
+}
+
+bool Covariance::Compute(
+    const vector<pair<const double*, const double*>>& covariance_blocks,
+    Problem* problem) {
+  return impl_->Compute(covariance_blocks, problem->problem_impl_.get());
+}
+
+bool Covariance::Compute(
+    const vector<const double*>& parameter_blocks,
+    Problem* problem) {
+  return impl_->Compute(parameter_blocks, problem->problem_impl_.get());
+}
+
+bool Covariance::GetCovarianceBlock(const double* parameter_block1,
+                                    const double* parameter_block2,
+                                    double* covariance_block) const {
+  return impl_->GetCovarianceBlockInTangentOrAmbientSpace(parameter_block1,
+                                                          parameter_block2,
+                                                          true,  // ambient
+                                                          covariance_block);
+}
+
+bool Covariance::GetCovarianceBlockInTangentSpace(
+    const double* parameter_block1,
+    const double* parameter_block2,
+    double* covariance_block) const {
+  return impl_->GetCovarianceBlockInTangentOrAmbientSpace(parameter_block1,
+                                                          parameter_block2,
+                                                          false,  // tangent
+                                                          covariance_block);
+}
+
+bool Covariance::GetCovarianceMatrix(
+    const vector<const double*>& parameter_blocks,
+    double* covariance_matrix) {
+  return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
+                                                           true,  // ambient
+                                                           covariance_matrix);
+}
+
+bool Covariance::GetCovarianceMatrixInTangentSpace(
+    const std::vector<const double *>& parameter_blocks,
+    double *covariance_matrix) {
+  return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
+                                                           false,  // tangent
+                                                           covariance_matrix);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
new file mode 100644
index 0000000..d24f5c9
--- /dev/null
+++ b/internal/ceres/covariance_impl.cc
@@ -0,0 +1,913 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/covariance_impl.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <memory>
+#include <numeric>
+#include <sstream>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "Eigen/SparseCore"
+#include "Eigen/SparseQR"
+#include "Eigen/SVD"
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/covariance.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/map_util.h"
+#include "ceres/parallel_for.h"
+#include "ceres/parallel_utils.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/residual_block.h"
+#include "ceres/suitesparse.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::map;
+using std::pair;
+using std::sort;
+using std::swap;
+using std::vector;
+
+typedef vector<pair<const double*, const double*>> CovarianceBlocks;
+
+CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
+    : options_(options),
+      is_computed_(false),
+      is_valid_(false) {
+#ifdef CERES_NO_THREADS
+  if (options_.num_threads > 1) {
+    LOG(WARNING)
+        << "No threading support is compiled into this binary; "
+        << "only options.num_threads = 1 is supported. Switching "
+        << "to single threaded mode.";
+    options_.num_threads = 1;
+  }
+#endif
+
+  evaluate_options_.num_threads = options_.num_threads;
+  evaluate_options_.apply_loss_function = options_.apply_loss_function;
+}
+
+CovarianceImpl::~CovarianceImpl() {
+}
+
+template <typename T> void CheckForDuplicates(vector<T> blocks) {
+  sort(blocks.begin(), blocks.end());
+  typename vector<T>::iterator it =
+      std::adjacent_find(blocks.begin(), blocks.end());
+  if (it != blocks.end()) {
+    // In case there are duplicates, we search for their location.
+    map<T, vector<int>> blocks_map;
+    for (int i = 0; i < blocks.size(); ++i) {
+      blocks_map[blocks[i]].push_back(i);
+    }
+
+    std::ostringstream duplicates;
+    while (it != blocks.end()) {
+      duplicates << "(";
+      for (int i = 0; i < blocks_map[*it].size() - 1; ++i) {
+        duplicates << blocks_map[*it][i] << ", ";
+      }
+      duplicates << blocks_map[*it].back() << ")";
+      it = std::adjacent_find(it + 1, blocks.end());
+      if (it < blocks.end()) {
+        duplicates << " and ";
+      }
+    }
+
+    LOG(FATAL) << "Covariance::Compute called with duplicate blocks at "
+               << "indices " << duplicates.str();
+  }
+}
+
+bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
+                             ProblemImpl* problem) {
+  CheckForDuplicates<pair<const double*, const double*>>(covariance_blocks);
+  problem_ = problem;
+  parameter_block_to_row_index_.clear();
+  covariance_matrix_.reset(NULL);
+  is_valid_ = (ComputeCovarianceSparsity(covariance_blocks, problem) &&
+               ComputeCovarianceValues());
+  is_computed_ = true;
+  return is_valid_;
+}
+
+bool CovarianceImpl::Compute(const vector<const double*>& parameter_blocks,
+                             ProblemImpl* problem) {
+  CheckForDuplicates<const double*>(parameter_blocks);
+  CovarianceBlocks covariance_blocks;
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    for (int j = i; j < parameter_blocks.size(); ++j) {
+      covariance_blocks.push_back(make_pair(parameter_blocks[i],
+                                            parameter_blocks[j]));
+    }
+  }
+
+  return Compute(covariance_blocks, problem);
+}
+
+bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
+    const double* original_parameter_block1,
+    const double* original_parameter_block2,
+    bool lift_covariance_to_ambient_space,
+    double* covariance_block) const {
+  CHECK(is_computed_)
+      << "Covariance::GetCovarianceBlock called before Covariance::Compute";
+  CHECK(is_valid_)
+      << "Covariance::GetCovarianceBlock called when Covariance::Compute "
+      << "returned false.";
+
+  // If either of the two parameter blocks is constant, then the
+  // covariance block is also zero.
+  if (constant_parameter_blocks_.count(original_parameter_block1) > 0 ||
+      constant_parameter_blocks_.count(original_parameter_block2) > 0) {
+    const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
+    ParameterBlock* block1 =
+        FindOrDie(parameter_map,
+                  const_cast<double*>(original_parameter_block1));
+
+    ParameterBlock* block2 =
+        FindOrDie(parameter_map,
+                  const_cast<double*>(original_parameter_block2));
+
+    const int block1_size = block1->Size();
+    const int block2_size = block2->Size();
+    const int block1_local_size = block1->LocalSize();
+    const int block2_local_size = block2->LocalSize();
+    if (!lift_covariance_to_ambient_space) {
+      MatrixRef(covariance_block, block1_local_size, block2_local_size)
+          .setZero();
+    } else {
+      MatrixRef(covariance_block, block1_size, block2_size).setZero();
+    }
+    return true;
+  }
+
+  const double* parameter_block1 = original_parameter_block1;
+  const double* parameter_block2 = original_parameter_block2;
+  const bool transpose = parameter_block1 > parameter_block2;
+  if (transpose) {
+    swap(parameter_block1, parameter_block2);
+  }
+
+  // Find where in the covariance matrix the block is located.
+  const int row_begin =
+      FindOrDie(parameter_block_to_row_index_, parameter_block1);
+  const int col_begin =
+      FindOrDie(parameter_block_to_row_index_, parameter_block2);
+  const int* rows = covariance_matrix_->rows();
+  const int* cols = covariance_matrix_->cols();
+  const int row_size = rows[row_begin + 1] - rows[row_begin];
+  const int* cols_begin = cols + rows[row_begin];
+
+  // The only part that requires work is walking the compressed column
+  // vector to determine where the set of columns correspnding to the
+  // covariance block begin.
+  int offset = 0;
+  while (cols_begin[offset] != col_begin && offset < row_size) {
+    ++offset;
+  }
+
+  if (offset == row_size) {
+    LOG(ERROR) << "Unable to find covariance block for "
+               << original_parameter_block1 << " "
+               << original_parameter_block2;
+    return false;
+  }
+
+  const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
+  ParameterBlock* block1 =
+      FindOrDie(parameter_map, const_cast<double*>(parameter_block1));
+  ParameterBlock* block2 =
+      FindOrDie(parameter_map, const_cast<double*>(parameter_block2));
+  const LocalParameterization* local_param1 = block1->local_parameterization();
+  const LocalParameterization* local_param2 = block2->local_parameterization();
+  const int block1_size = block1->Size();
+  const int block1_local_size = block1->LocalSize();
+  const int block2_size = block2->Size();
+  const int block2_local_size = block2->LocalSize();
+
+  ConstMatrixRef cov(covariance_matrix_->values() + rows[row_begin],
+                     block1_size,
+                     row_size);
+
+  // Fast path when there are no local parameterizations or if the
+  // user does not want it lifted to the ambient space.
+  if ((local_param1 == NULL && local_param2 == NULL) ||
+      !lift_covariance_to_ambient_space) {
+    if (transpose) {
+      MatrixRef(covariance_block, block2_local_size, block1_local_size) =
+          cov.block(0, offset, block1_local_size,
+                    block2_local_size).transpose();
+    } else {
+      MatrixRef(covariance_block, block1_local_size, block2_local_size) =
+          cov.block(0, offset, block1_local_size, block2_local_size);
+    }
+    return true;
+  }
+
+  // If local parameterizations are used then the covariance that has
+  // been computed is in the tangent space and it needs to be lifted
+  // back to the ambient space.
+  //
+  // This is given by the formula
+  //
+  //  C'_12 = J_1 C_12 J_2'
+  //
+  // Where C_12 is the local tangent space covariance for parameter
+  // blocks 1 and 2. J_1 and J_2 are respectively the local to global
+  // jacobians for parameter blocks 1 and 2.
+  //
+  // See Result 5.11 on page 142 of Hartley & Zisserman (2nd Edition)
+  // for a proof.
+  //
+  // TODO(sameeragarwal): Add caching of local parameterization, so
+  // that they are computed just once per parameter block.
+  Matrix block1_jacobian(block1_size, block1_local_size);
+  if (local_param1 == NULL) {
+    block1_jacobian.setIdentity();
+  } else {
+    local_param1->ComputeJacobian(parameter_block1, block1_jacobian.data());
+  }
+
+  Matrix block2_jacobian(block2_size, block2_local_size);
+  // Fast path if the user is requesting a diagonal block.
+  if (parameter_block1 == parameter_block2) {
+    block2_jacobian = block1_jacobian;
+  } else {
+    if (local_param2 == NULL) {
+      block2_jacobian.setIdentity();
+    } else {
+      local_param2->ComputeJacobian(parameter_block2, block2_jacobian.data());
+    }
+  }
+
+  if (transpose) {
+    MatrixRef(covariance_block, block2_size, block1_size) =
+        block2_jacobian *
+        cov.block(0, offset, block1_local_size, block2_local_size).transpose() *
+        block1_jacobian.transpose();
+  } else {
+    MatrixRef(covariance_block, block1_size, block2_size) =
+        block1_jacobian *
+        cov.block(0, offset, block1_local_size, block2_local_size) *
+        block2_jacobian.transpose();
+  }
+
+  return true;
+}
+
+bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
+    const vector<const double*>& parameters,
+    bool lift_covariance_to_ambient_space,
+    double* covariance_matrix) const {
+  CHECK(is_computed_)
+      << "Covariance::GetCovarianceMatrix called before Covariance::Compute";
+  CHECK(is_valid_)
+      << "Covariance::GetCovarianceMatrix called when Covariance::Compute "
+      << "returned false.";
+
+  const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
+  // For OpenMP compatibility we need to define these vectors in advance
+  const int num_parameters = parameters.size();
+  vector<int> parameter_sizes;
+  vector<int> cum_parameter_size;
+  parameter_sizes.reserve(num_parameters);
+  cum_parameter_size.resize(num_parameters + 1);
+  cum_parameter_size[0] = 0;
+  for (int i = 0; i < num_parameters; ++i) {
+    ParameterBlock* block =
+        FindOrDie(parameter_map, const_cast<double*>(parameters[i]));
+    if (lift_covariance_to_ambient_space) {
+      parameter_sizes.push_back(block->Size());
+    } else {
+      parameter_sizes.push_back(block->LocalSize());
+    }
+  }
+  std::partial_sum(parameter_sizes.begin(), parameter_sizes.end(),
+                   cum_parameter_size.begin() + 1);
+  const int max_covariance_block_size =
+      *std::max_element(parameter_sizes.begin(), parameter_sizes.end());
+  const int covariance_size = cum_parameter_size.back();
+
+  // Assemble the blocks in the covariance matrix.
+  MatrixRef covariance(covariance_matrix, covariance_size, covariance_size);
+  const int num_threads = options_.num_threads;
+  std::unique_ptr<double[]> workspace(
+      new double[num_threads * max_covariance_block_size *
+                 max_covariance_block_size]);
+
+  bool success = true;
+
+  // Technically the following code is a double nested loop where
+  // i = 1:n, j = i:n.
+  int iteration_count = (num_parameters * (num_parameters + 1)) / 2;
+  problem_->context()->EnsureMinimumThreads(num_threads);
+  ParallelFor(
+      problem_->context(),
+      0,
+      iteration_count,
+      num_threads,
+      [&](int thread_id, int k) {
+        int i, j;
+        LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
+
+        int covariance_row_idx = cum_parameter_size[i];
+        int covariance_col_idx = cum_parameter_size[j];
+        int size_i = parameter_sizes[i];
+        int size_j = parameter_sizes[j];
+        double* covariance_block =
+            workspace.get() + thread_id * max_covariance_block_size *
+            max_covariance_block_size;
+        if (!GetCovarianceBlockInTangentOrAmbientSpace(
+                parameters[i], parameters[j],
+                lift_covariance_to_ambient_space, covariance_block)) {
+          success = false;
+        }
+
+        covariance.block(covariance_row_idx, covariance_col_idx, size_i,
+                         size_j) = MatrixRef(covariance_block, size_i, size_j);
+
+        if (i != j) {
+          covariance.block(covariance_col_idx, covariance_row_idx,
+                           size_j, size_i) =
+              MatrixRef(covariance_block, size_i, size_j).transpose();
+        }
+      });
+  return success;
+}
+
+// Determine the sparsity pattern of the covariance matrix based on
+// the block pairs requested by the user.
+bool CovarianceImpl::ComputeCovarianceSparsity(
+    const CovarianceBlocks&  original_covariance_blocks,
+    ProblemImpl* problem) {
+  EventLogger event_logger("CovarianceImpl::ComputeCovarianceSparsity");
+
+  // Determine an ordering for the parameter block, by sorting the
+  // parameter blocks by their pointers.
+  vector<double*> all_parameter_blocks;
+  problem->GetParameterBlocks(&all_parameter_blocks);
+  const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
+  std::unordered_set<ParameterBlock*> parameter_blocks_in_use;
+  vector<ResidualBlock*> residual_blocks;
+  problem->GetResidualBlocks(&residual_blocks);
+
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks[i];
+    parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
+                                   residual_block->parameter_blocks() +
+                                   residual_block->NumParameterBlocks());
+  }
+
+  constant_parameter_blocks_.clear();
+  vector<double*>& active_parameter_blocks =
+      evaluate_options_.parameter_blocks;
+  active_parameter_blocks.clear();
+  for (int i = 0; i < all_parameter_blocks.size(); ++i) {
+    double* parameter_block = all_parameter_blocks[i];
+    ParameterBlock* block = FindOrDie(parameter_map, parameter_block);
+    if (!block->IsConstant() && (parameter_blocks_in_use.count(block) > 0)) {
+      active_parameter_blocks.push_back(parameter_block);
+    } else {
+      constant_parameter_blocks_.insert(parameter_block);
+    }
+  }
+
+  std::sort(active_parameter_blocks.begin(), active_parameter_blocks.end());
+
+  // Compute the number of rows.  Map each parameter block to the
+  // first row corresponding to it in the covariance matrix using the
+  // ordering of parameter blocks just constructed.
+  int num_rows = 0;
+  parameter_block_to_row_index_.clear();
+  for (int i = 0; i < active_parameter_blocks.size(); ++i) {
+    double* parameter_block = active_parameter_blocks[i];
+    const int parameter_block_size =
+        problem->ParameterBlockLocalSize(parameter_block);
+    parameter_block_to_row_index_[parameter_block] = num_rows;
+    num_rows += parameter_block_size;
+  }
+
+  // Compute the number of non-zeros in the covariance matrix.  Along
+  // the way flip any covariance blocks which are in the lower
+  // triangular part of the matrix.
+  int num_nonzeros = 0;
+  CovarianceBlocks covariance_blocks;
+  for (int i = 0; i <  original_covariance_blocks.size(); ++i) {
+    const pair<const double*, const double*>& block_pair =
+        original_covariance_blocks[i];
+    if (constant_parameter_blocks_.count(block_pair.first) > 0 ||
+        constant_parameter_blocks_.count(block_pair.second) > 0) {
+      continue;
+    }
+
+    int index1 = FindOrDie(parameter_block_to_row_index_, block_pair.first);
+    int index2 = FindOrDie(parameter_block_to_row_index_, block_pair.second);
+    const int size1 = problem->ParameterBlockLocalSize(block_pair.first);
+    const int size2 = problem->ParameterBlockLocalSize(block_pair.second);
+    num_nonzeros += size1 * size2;
+
+    // Make sure we are constructing a block upper triangular matrix.
+    if (index1 > index2) {
+      covariance_blocks.push_back(make_pair(block_pair.second,
+                                            block_pair.first));
+    } else {
+      covariance_blocks.push_back(block_pair);
+    }
+  }
+
+  if (covariance_blocks.size() == 0) {
+    VLOG(2) << "No non-zero covariance blocks found";
+    covariance_matrix_.reset(NULL);
+    return true;
+  }
+
+  // Sort the block pairs. As a consequence we get the covariance
+  // blocks as they will occur in the CompressedRowSparseMatrix that
+  // will store the covariance.
+  sort(covariance_blocks.begin(), covariance_blocks.end());
+
+  // Fill the sparsity pattern of the covariance matrix.
+  covariance_matrix_.reset(
+      new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros));
+
+  int* rows = covariance_matrix_->mutable_rows();
+  int* cols = covariance_matrix_->mutable_cols();
+
+  // Iterate over parameter blocks and in turn over the rows of the
+  // covariance matrix. For each parameter block, look in the upper
+  // triangular part of the covariance matrix to see if there are any
+  // blocks requested by the user. If this is the case then fill out a
+  // set of compressed rows corresponding to this parameter block.
+  //
+  // The key thing that makes this loop work is the fact that the
+  // row/columns of the covariance matrix are ordered by the pointer
+  // values of the parameter blocks. Thus iterating over the keys of
+  // parameter_block_to_row_index_ corresponds to iterating over the
+  // rows of the covariance matrix in order.
+  int i = 0;  // index into covariance_blocks.
+  int cursor = 0;  // index into the covariance matrix.
+  for (const auto& entry : parameter_block_to_row_index_) {
+    const double* row_block =  entry.first;
+    const int row_block_size = problem->ParameterBlockLocalSize(row_block);
+    int row_begin = entry.second;
+
+    // Iterate over the covariance blocks contained in this row block
+    // and count the number of columns in this row block.
+    int num_col_blocks = 0;
+    int num_columns = 0;
+    for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) {
+      const pair<const double*, const double*>& block_pair =
+          covariance_blocks[j];
+      if (block_pair.first != row_block) {
+        break;
+      }
+      num_columns += problem->ParameterBlockLocalSize(block_pair.second);
+    }
+
+    // Fill out all the compressed rows for this parameter block.
+    for (int r = 0; r < row_block_size; ++r) {
+      rows[row_begin + r] = cursor;
+      for (int c = 0; c < num_col_blocks; ++c) {
+        const double* col_block = covariance_blocks[i + c].second;
+        const int col_block_size = problem->ParameterBlockLocalSize(col_block);
+        int col_begin = FindOrDie(parameter_block_to_row_index_, col_block);
+        for (int k = 0; k < col_block_size; ++k) {
+          cols[cursor++] = col_begin++;
+        }
+      }
+    }
+
+    i+= num_col_blocks;
+  }
+
+  rows[num_rows] = cursor;
+  return true;
+}
+
+bool CovarianceImpl::ComputeCovarianceValues() {
+  if (options_.algorithm_type == DENSE_SVD) {
+    return ComputeCovarianceValuesUsingDenseSVD();
+  }
+
+  if (options_.algorithm_type == SPARSE_QR) {
+    if (options_.sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+      return ComputeCovarianceValuesUsingEigenSparseQR();
+    }
+
+    if (options_.sparse_linear_algebra_library_type == SUITE_SPARSE) {
+#if !defined(CERES_NO_SUITESPARSE)
+      return ComputeCovarianceValuesUsingSuiteSparseQR();
+#else
+      LOG(ERROR) << "SuiteSparse is required to use the SPARSE_QR algorithm "
+                 << "with "
+                 << "Covariance::Options::sparse_linear_algebra_library_type "
+                 << "= SUITE_SPARSE.";
+      return false;
+#endif
+    }
+
+    LOG(ERROR) << "Unsupported "
+               << "Covariance::Options::sparse_linear_algebra_library_type "
+               << "= "
+               << SparseLinearAlgebraLibraryTypeToString(
+                      options_.sparse_linear_algebra_library_type);
+    return false;
+  }
+
+  LOG(ERROR) << "Unsupported Covariance::Options::algorithm_type = "
+             << CovarianceAlgorithmTypeToString(options_.algorithm_type);
+  return false;
+}
+
+bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
+  EventLogger event_logger(
+      "CovarianceImpl::ComputeCovarianceValuesUsingSparseQR");
+
+#ifndef CERES_NO_SUITESPARSE
+  if (covariance_matrix_.get() == NULL) {
+    // Nothing to do, all zeros covariance matrix.
+    return true;
+  }
+
+  CRSMatrix jacobian;
+  problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+  event_logger.AddEvent("Evaluate");
+
+  // Construct a compressed column form of the Jacobian.
+  const int num_rows = jacobian.num_rows;
+  const int num_cols = jacobian.num_cols;
+  const int num_nonzeros = jacobian.values.size();
+
+  vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
+  vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
+  vector<double> transpose_values(num_nonzeros, 0);
+
+  for (int idx = 0; idx < num_nonzeros; ++idx) {
+    transpose_rows[jacobian.cols[idx] + 1] += 1;
+  }
+
+  for (int i = 1; i < transpose_rows.size(); ++i) {
+    transpose_rows[i] += transpose_rows[i - 1];
+  }
+
+  for (int r = 0; r < num_rows; ++r) {
+    for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) {
+      const int c = jacobian.cols[idx];
+      const int transpose_idx = transpose_rows[c];
+      transpose_cols[transpose_idx] = r;
+      transpose_values[transpose_idx] = jacobian.values[idx];
+      ++transpose_rows[c];
+    }
+  }
+
+  for (int i = transpose_rows.size() - 1; i > 0 ; --i) {
+    transpose_rows[i] = transpose_rows[i - 1];
+  }
+  transpose_rows[0] = 0;
+
+  cholmod_sparse cholmod_jacobian;
+  cholmod_jacobian.nrow = num_rows;
+  cholmod_jacobian.ncol = num_cols;
+  cholmod_jacobian.nzmax = num_nonzeros;
+  cholmod_jacobian.nz = NULL;
+  cholmod_jacobian.p = reinterpret_cast<void*>(&transpose_rows[0]);
+  cholmod_jacobian.i = reinterpret_cast<void*>(&transpose_cols[0]);
+  cholmod_jacobian.x = reinterpret_cast<void*>(&transpose_values[0]);
+  cholmod_jacobian.z = NULL;
+  cholmod_jacobian.stype = 0;  // Matrix is not symmetric.
+  cholmod_jacobian.itype = CHOLMOD_LONG;
+  cholmod_jacobian.xtype = CHOLMOD_REAL;
+  cholmod_jacobian.dtype = CHOLMOD_DOUBLE;
+  cholmod_jacobian.sorted = 1;
+  cholmod_jacobian.packed = 1;
+
+  cholmod_common cc;
+  cholmod_l_start(&cc);
+
+  cholmod_sparse* R = NULL;
+  SuiteSparse_long* permutation = NULL;
+
+  // Compute a Q-less QR factorization of the Jacobian. Since we are
+  // only interested in inverting J'J = R'R, we do not need Q. This
+  // saves memory and gives us R as a permuted compressed column
+  // sparse matrix.
+  //
+  // TODO(sameeragarwal): Currently the symbolic factorization and the
+  // numeric factorization is done at the same time, and this does not
+  // explicitly account for the block column and row structure in the
+  // matrix. When using AMD, we have observed in the past that
+  // computing the ordering with the block matrix is significantly
+  // more efficient, both in runtime as well as the quality of
+  // ordering computed. So, it maybe worth doing that analysis
+  // separately.
+  const SuiteSparse_long rank =
+      SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD,
+                            SPQR_DEFAULT_TOL,
+                            cholmod_jacobian.ncol,
+                            &cholmod_jacobian,
+                            &R,
+                            &permutation,
+                            &cc);
+  event_logger.AddEvent("Numeric Factorization");
+  CHECK(R != nullptr);
+
+  if (rank < cholmod_jacobian.ncol) {
+    LOG(ERROR) << "Jacobian matrix is rank deficient. "
+               << "Number of columns: " << cholmod_jacobian.ncol
+               << " rank: " << rank;
+    free(permutation);
+    cholmod_l_free_sparse(&R, &cc);
+    cholmod_l_finish(&cc);
+    return false;
+  }
+
+  vector<int> inverse_permutation(num_cols);
+  if (permutation) {
+    for (SuiteSparse_long i = 0; i < num_cols; ++i) {
+      inverse_permutation[permutation[i]] = i;
+    }
+  } else {
+    for (SuiteSparse_long i = 0; i < num_cols; ++i) {
+      inverse_permutation[i] = i;
+    }
+  }
+
+  const int* rows = covariance_matrix_->rows();
+  const int* cols = covariance_matrix_->cols();
+  double* values = covariance_matrix_->mutable_values();
+
+  // The following loop exploits the fact that the i^th column of A^{-1}
+  // is given by the solution to the linear system
+  //
+  //  A x = e_i
+  //
+  // where e_i is a vector with e(i) = 1 and all other entries zero.
+  //
+  // Since the covariance matrix is symmetric, the i^th row and column
+  // are equal.
+  const int num_threads = options_.num_threads;
+  std::unique_ptr<double[]> workspace(new double[num_threads * num_cols]);
+
+  problem_->context()->EnsureMinimumThreads(num_threads);
+  ParallelFor(
+      problem_->context(),
+      0,
+      num_cols,
+      num_threads,
+      [&](int thread_id, int r) {
+        const int row_begin = rows[r];
+        const int row_end = rows[r + 1];
+        if (row_end != row_begin) {
+          double* solution = workspace.get() + thread_id * num_cols;
+          SolveRTRWithSparseRHS<SuiteSparse_long>(
+              num_cols, static_cast<SuiteSparse_long*>(R->i),
+              static_cast<SuiteSparse_long*>(R->p), static_cast<double*>(R->x),
+              inverse_permutation[r], solution);
+          for (int idx = row_begin; idx < row_end; ++idx) {
+            const int c = cols[idx];
+            values[idx] = solution[inverse_permutation[c]];
+          }
+        }
+      });
+
+  free(permutation);
+  cholmod_l_free_sparse(&R, &cc);
+  cholmod_l_finish(&cc);
+  event_logger.AddEvent("Inversion");
+  return true;
+
+#else  // CERES_NO_SUITESPARSE
+
+  return false;
+
+#endif  // CERES_NO_SUITESPARSE
+}
+
+bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
+  EventLogger event_logger(
+      "CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD");
+  if (covariance_matrix_.get() == NULL) {
+    // Nothing to do, all zeros covariance matrix.
+    return true;
+  }
+
+  CRSMatrix jacobian;
+  problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+  event_logger.AddEvent("Evaluate");
+
+  Matrix dense_jacobian(jacobian.num_rows, jacobian.num_cols);
+  dense_jacobian.setZero();
+  for (int r = 0; r < jacobian.num_rows; ++r) {
+    for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) {
+      const int c = jacobian.cols[idx];
+      dense_jacobian(r, c) = jacobian.values[idx];
+    }
+  }
+  event_logger.AddEvent("ConvertToDenseMatrix");
+
+  Eigen::JacobiSVD<Matrix> svd(dense_jacobian,
+                               Eigen::ComputeThinU | Eigen::ComputeThinV);
+
+  event_logger.AddEvent("SingularValueDecomposition");
+
+  const Vector singular_values = svd.singularValues();
+  const int num_singular_values = singular_values.rows();
+  Vector inverse_squared_singular_values(num_singular_values);
+  inverse_squared_singular_values.setZero();
+
+  const double max_singular_value = singular_values[0];
+  const double min_singular_value_ratio =
+      sqrt(options_.min_reciprocal_condition_number);
+
+  const bool automatic_truncation = (options_.null_space_rank < 0);
+  const int max_rank = std::min(num_singular_values,
+                                num_singular_values - options_.null_space_rank);
+
+  // Compute the squared inverse of the singular values. Truncate the
+  // computation based on min_singular_value_ratio and
+  // null_space_rank. When either of these two quantities are active,
+  // the resulting covariance matrix is a Moore-Penrose inverse
+  // instead of a regular inverse.
+  for (int i = 0; i < max_rank; ++i) {
+    const double singular_value_ratio = singular_values[i] / max_singular_value;
+    if (singular_value_ratio < min_singular_value_ratio) {
+      // Since the singular values are in decreasing order, if
+      // automatic truncation is enabled, then from this point on
+      // all values will fail the ratio test and there is nothing to
+      // do in this loop.
+      if (automatic_truncation) {
+        break;
+      } else {
+        LOG(ERROR) << "Error: Covariance matrix is near rank deficient "
+                   << "and the user did not specify a non-zero"
+                   << "Covariance::Options::null_space_rank "
+                   << "to enable the computation of a Pseudo-Inverse. "
+                   << "Reciprocal condition number: "
+                   << singular_value_ratio * singular_value_ratio << " "
+                   << "min_reciprocal_condition_number: "
+                   << options_.min_reciprocal_condition_number;
+        return false;
+      }
+    }
+
+    inverse_squared_singular_values[i] =
+        1.0 / (singular_values[i] * singular_values[i]);
+  }
+
+  Matrix dense_covariance =
+      svd.matrixV() *
+      inverse_squared_singular_values.asDiagonal() *
+      svd.matrixV().transpose();
+  event_logger.AddEvent("PseudoInverse");
+
+  const int num_rows = covariance_matrix_->num_rows();
+  const int* rows = covariance_matrix_->rows();
+  const int* cols = covariance_matrix_->cols();
+  double* values = covariance_matrix_->mutable_values();
+
+  for (int r = 0; r < num_rows; ++r) {
+    for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
+      const int c = cols[idx];
+      values[idx] = dense_covariance(r, c);
+    }
+  }
+  event_logger.AddEvent("CopyToCovarianceMatrix");
+  return true;
+}
+
+bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
+  EventLogger event_logger(
+      "CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR");
+  if (covariance_matrix_.get() == NULL) {
+    // Nothing to do, all zeros covariance matrix.
+    return true;
+  }
+
+  CRSMatrix jacobian;
+  problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+  event_logger.AddEvent("Evaluate");
+
+  typedef Eigen::SparseMatrix<double, Eigen::ColMajor> EigenSparseMatrix;
+
+  // Convert the matrix to column major order as required by SparseQR.
+  EigenSparseMatrix sparse_jacobian =
+      Eigen::MappedSparseMatrix<double, Eigen::RowMajor>(
+          jacobian.num_rows, jacobian.num_cols,
+          static_cast<int>(jacobian.values.size()),
+          jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
+  event_logger.AddEvent("ConvertToSparseMatrix");
+
+  Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>>
+      qr_solver(sparse_jacobian);
+  event_logger.AddEvent("QRDecomposition");
+
+  if (qr_solver.info() != Eigen::Success) {
+    LOG(ERROR) << "Eigen::SparseQR decomposition failed.";
+    return false;
+  }
+
+  if (qr_solver.rank() < jacobian.num_cols) {
+    LOG(ERROR) << "Jacobian matrix is rank deficient. "
+               << "Number of columns: " << jacobian.num_cols
+               << " rank: " << qr_solver.rank();
+    return false;
+  }
+
+  const int* rows = covariance_matrix_->rows();
+  const int* cols = covariance_matrix_->cols();
+  double* values = covariance_matrix_->mutable_values();
+
+  // Compute the inverse column permutation used by QR factorization.
+  Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic> inverse_permutation =
+      qr_solver.colsPermutation().inverse();
+
+  // The following loop exploits the fact that the i^th column of A^{-1}
+  // is given by the solution to the linear system
+  //
+  //  A x = e_i
+  //
+  // where e_i is a vector with e(i) = 1 and all other entries zero.
+  //
+  // Since the covariance matrix is symmetric, the i^th row and column
+  // are equal.
+  const int num_cols = jacobian.num_cols;
+  const int num_threads = options_.num_threads;
+  std::unique_ptr<double[]> workspace(new double[num_threads * num_cols]);
+
+  problem_->context()->EnsureMinimumThreads(num_threads);
+  ParallelFor(
+      problem_->context(),
+      0,
+      num_cols,
+      num_threads,
+      [&](int thread_id, int r) {
+        const int row_begin = rows[r];
+        const int row_end = rows[r + 1];
+        if (row_end != row_begin) {
+          double* solution = workspace.get() + thread_id * num_cols;
+          SolveRTRWithSparseRHS<int>(
+              num_cols,
+              qr_solver.matrixR().innerIndexPtr(),
+              qr_solver.matrixR().outerIndexPtr(),
+              &qr_solver.matrixR().data().value(0),
+              inverse_permutation.indices().coeff(r),
+              solution);
+
+          // Assign the values of the computed covariance using the
+          // inverse permutation used in the QR factorization.
+          for (int idx = row_begin; idx < row_end; ++idx) {
+            const int c = cols[idx];
+            values[idx] = solution[inverse_permutation.indices().coeff(c)];
+          }
+        }
+      });
+
+  event_logger.AddEvent("Inverse");
+
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/covariance_impl.h b/internal/ceres/covariance_impl.h
new file mode 100644
index 0000000..065e43c
--- /dev/null
+++ b/internal/ceres/covariance_impl.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COVARIANCE_IMPL_H_
+#define CERES_INTERNAL_COVARIANCE_IMPL_H_
+
+#include <map>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+#include "ceres/covariance.h"
+#include "ceres/problem_impl.h"
+#include "ceres/suitesparse.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+
+class CovarianceImpl {
+ public:
+  explicit CovarianceImpl(const Covariance::Options& options);
+  ~CovarianceImpl();
+
+  bool Compute(
+      const std::vector<std::pair<const double*,
+                                  const double*>>& covariance_blocks,
+      ProblemImpl* problem);
+
+  bool Compute(
+      const std::vector<const double*>& parameter_blocks,
+      ProblemImpl* problem);
+
+  bool GetCovarianceBlockInTangentOrAmbientSpace(
+      const double* parameter_block1,
+      const double* parameter_block2,
+      bool lift_covariance_to_ambient_space,
+      double* covariance_block) const;
+
+  bool GetCovarianceMatrixInTangentOrAmbientSpace(
+      const std::vector<const double*>& parameters,
+      bool lift_covariance_to_ambient_space,
+      double *covariance_matrix) const;
+
+  bool ComputeCovarianceSparsity(
+      const std::vector<std::pair<const double*,
+                                  const double*>>& covariance_blocks,
+      ProblemImpl* problem);
+
+  bool ComputeCovarianceValues();
+  bool ComputeCovarianceValuesUsingDenseSVD();
+  bool ComputeCovarianceValuesUsingSuiteSparseQR();
+  bool ComputeCovarianceValuesUsingEigenSparseQR();
+
+  const CompressedRowSparseMatrix* covariance_matrix() const {
+    return covariance_matrix_.get();
+  }
+
+ private:
+  ProblemImpl* problem_;
+  Covariance::Options options_;
+  Problem::EvaluateOptions evaluate_options_;
+  bool is_computed_;
+  bool is_valid_;
+  std::map<const double*, int> parameter_block_to_row_index_;
+  std::set<const double*> constant_parameter_blocks_;
+  std::unique_ptr<CompressedRowSparseMatrix> covariance_matrix_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_COVARIANCE_IMPL_H_
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
new file mode 100644
index 0000000..dea0723
--- /dev/null
+++ b/internal/ceres/covariance_test.cc
@@ -0,0 +1,1279 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/covariance.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <cmath>
+#include <map>
+#include <memory>
+#include <utility>
+
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cost_function.h"
+#include "ceres/covariance_impl.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/map_util.h"
+#include "ceres/problem_impl.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::map;
+using std::pair;
+using std::vector;
+
+class UnaryCostFunction: public CostFunction {
+ public:
+  UnaryCostFunction(const int num_residuals,
+                    const int32_t parameter_block_size,
+                    const double* jacobian)
+      : jacobian_(jacobian, jacobian + num_residuals * parameter_block_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 1;
+    }
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    if (jacobians[0] != NULL) {
+      copy(jacobian_.begin(), jacobian_.end(), jacobians[0]);
+    }
+
+    return true;
+  }
+
+ private:
+  vector<double> jacobian_;
+};
+
+
+class BinaryCostFunction: public CostFunction {
+ public:
+  BinaryCostFunction(const int num_residuals,
+                     const int32_t parameter_block1_size,
+                     const int32_t parameter_block2_size,
+                     const double* jacobian1,
+                     const double* jacobian2)
+      : jacobian1_(jacobian1,
+                   jacobian1 + num_residuals * parameter_block1_size),
+        jacobian2_(jacobian2,
+                   jacobian2 + num_residuals * parameter_block2_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 2;
+    }
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    if (jacobians[0] != NULL) {
+      copy(jacobian1_.begin(), jacobian1_.end(), jacobians[0]);
+    }
+
+    if (jacobians[1] != NULL) {
+      copy(jacobian2_.begin(), jacobian2_.end(), jacobians[1]);
+    }
+
+    return true;
+  }
+
+ private:
+  vector<double> jacobian1_;
+  vector<double> jacobian2_;
+};
+
+// x_plus_delta = delta * x;
+class PolynomialParameterization : public LocalParameterization {
+ public:
+  virtual ~PolynomialParameterization() {}
+
+  virtual bool Plus(const double* x,
+                    const double* delta,
+                    double* x_plus_delta) const {
+    x_plus_delta[0] = delta[0] * x[0];
+    x_plus_delta[1] = delta[0] * x[1];
+    return true;
+  }
+
+  virtual bool ComputeJacobian(const double* x, double* jacobian) const {
+    jacobian[0] = x[0];
+    jacobian[1] = x[1];
+    return true;
+  }
+
+  virtual int GlobalSize() const { return 2; }
+  virtual int LocalSize() const { return 1; }
+};
+
+TEST(CovarianceImpl, ComputeCovarianceSparsity) {
+  double parameters[10];
+
+  double* block1 = parameters;
+  double* block2 = block1 + 1;
+  double* block3 = block2 + 2;
+  double* block4 = block3 + 3;
+
+  ProblemImpl problem;
+
+  // Add in random order
+  Vector junk_jacobian = Vector::Zero(10);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+
+  // Sparsity pattern
+  //
+  // Note that the problem structure does not imply this sparsity
+  // pattern since all the residual blocks are unary. But the
+  // ComputeCovarianceSparsity function in its current incarnation
+  // does not pay attention to this fact and only looks at the
+  // parameter block pairs that the user provides.
+  //
+  //  X . . . . . X X X X
+  //  . X X X X X . . . .
+  //  . X X X X X . . . .
+  //  . . . X X X . . . .
+  //  . . . X X X . . . .
+  //  . . . X X X . . . .
+  //  . . . . . . X X X X
+  //  . . . . . . X X X X
+  //  . . . . . . X X X X
+  //  . . . . . . X X X X
+
+  int expected_rows[] = {0, 5, 10, 15, 18, 21, 24, 28, 32, 36, 40};
+  int expected_cols[] = {0, 6, 7, 8, 9,
+                         1, 2, 3, 4, 5,
+                         1, 2, 3, 4, 5,
+                         3, 4, 5,
+                         3, 4, 5,
+                         3, 4, 5,
+                         6, 7, 8, 9,
+                         6, 7, 8, 9,
+                         6, 7, 8, 9,
+                         6, 7, 8, 9};
+
+
+  vector<pair<const double*, const double*>> covariance_blocks;
+  covariance_blocks.push_back(make_pair(block1, block1));
+  covariance_blocks.push_back(make_pair(block4, block4));
+  covariance_blocks.push_back(make_pair(block2, block2));
+  covariance_blocks.push_back(make_pair(block3, block3));
+  covariance_blocks.push_back(make_pair(block2, block3));
+  covariance_blocks.push_back(make_pair(block4, block1));  // reversed
+
+  Covariance::Options options;
+  CovarianceImpl covariance_impl(options);
+  EXPECT_TRUE(covariance_impl
+              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+
+  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
+
+  EXPECT_EQ(crsm->num_rows(), 10);
+  EXPECT_EQ(crsm->num_cols(), 10);
+  EXPECT_EQ(crsm->num_nonzeros(), 40);
+
+  const int* rows = crsm->rows();
+  for (int r = 0; r < crsm->num_rows() + 1; ++r) {
+    EXPECT_EQ(rows[r], expected_rows[r])
+        << r << " "
+        << rows[r] << " "
+        << expected_rows[r];
+  }
+
+  const int* cols = crsm->cols();
+  for (int c = 0; c < crsm->num_nonzeros(); ++c) {
+    EXPECT_EQ(cols[c], expected_cols[c])
+        << c << " "
+        << cols[c] << " "
+        << expected_cols[c];
+  }
+}
+
+TEST(CovarianceImpl, ComputeCovarianceSparsityWithConstantParameterBlock) {
+  double parameters[10];
+
+  double* block1 = parameters;
+  double* block2 = block1 + 1;
+  double* block3 = block2 + 2;
+  double* block4 = block3 + 3;
+
+  ProblemImpl problem;
+
+  // Add in random order
+  Vector junk_jacobian = Vector::Zero(10);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 3, junk_jacobian.data()), NULL, block3);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+  problem.SetParameterBlockConstant(block3);
+
+  // Sparsity pattern
+  //
+  // Note that the problem structure does not imply this sparsity
+  // pattern since all the residual blocks are unary. But the
+  // ComputeCovarianceSparsity function in its current incarnation
+  // does not pay attention to this fact and only looks at the
+  // parameter block pairs that the user provides.
+  //
+  //  X . . X X X X
+  //  . X X . . . .
+  //  . X X . . . .
+  //  . . . X X X X
+  //  . . . X X X X
+  //  . . . X X X X
+  //  . . . X X X X
+
+  int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};
+  int expected_cols[] = {0, 3, 4, 5, 6,
+                         1, 2,
+                         1, 2,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6};
+
+  vector<pair<const double*, const double*>> covariance_blocks;
+  covariance_blocks.push_back(make_pair(block1, block1));
+  covariance_blocks.push_back(make_pair(block4, block4));
+  covariance_blocks.push_back(make_pair(block2, block2));
+  covariance_blocks.push_back(make_pair(block3, block3));
+  covariance_blocks.push_back(make_pair(block2, block3));
+  covariance_blocks.push_back(make_pair(block4, block1));  // reversed
+
+  Covariance::Options options;
+  CovarianceImpl covariance_impl(options);
+  EXPECT_TRUE(covariance_impl
+              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+
+  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
+
+  EXPECT_EQ(crsm->num_rows(), 7);
+  EXPECT_EQ(crsm->num_cols(), 7);
+  EXPECT_EQ(crsm->num_nonzeros(), 25);
+
+  const int* rows = crsm->rows();
+  for (int r = 0; r < crsm->num_rows() + 1; ++r) {
+    EXPECT_EQ(rows[r], expected_rows[r])
+        << r << " "
+        << rows[r] << " "
+        << expected_rows[r];
+  }
+
+  const int* cols = crsm->cols();
+  for (int c = 0; c < crsm->num_nonzeros(); ++c) {
+    EXPECT_EQ(cols[c], expected_cols[c])
+        << c << " "
+        << cols[c] << " "
+        << expected_cols[c];
+  }
+}
+
+TEST(CovarianceImpl, ComputeCovarianceSparsityWithFreeParameterBlock) {
+  double parameters[10];
+
+  double* block1 = parameters;
+  double* block2 = block1 + 1;
+  double* block3 = block2 + 2;
+  double* block4 = block3 + 3;
+
+  ProblemImpl problem;
+
+  // Add in random order
+  Vector junk_jacobian = Vector::Zero(10);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 1, junk_jacobian.data()), NULL, block1);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 4, junk_jacobian.data()), NULL, block4);
+  problem.AddParameterBlock(block3, 3);
+  problem.AddResidualBlock(
+      new UnaryCostFunction(1, 2, junk_jacobian.data()), NULL, block2);
+
+  // Sparsity pattern
+  //
+  // Note that the problem structure does not imply this sparsity
+  // pattern since all the residual blocks are unary. But the
+  // ComputeCovarianceSparsity function in its current incarnation
+  // does not pay attention to this fact and only looks at the
+  // parameter block pairs that the user provides.
+  //
+  //  X . . X X X X
+  //  . X X . . . .
+  //  . X X . . . .
+  //  . . . X X X X
+  //  . . . X X X X
+  //  . . . X X X X
+  //  . . . X X X X
+
+  int expected_rows[] = {0, 5, 7, 9, 13, 17, 21, 25};
+  int expected_cols[] = {0, 3, 4, 5, 6,
+                         1, 2,
+                         1, 2,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6,
+                         3, 4, 5, 6};
+
+  vector<pair<const double*, const double*>> covariance_blocks;
+  covariance_blocks.push_back(make_pair(block1, block1));
+  covariance_blocks.push_back(make_pair(block4, block4));
+  covariance_blocks.push_back(make_pair(block2, block2));
+  covariance_blocks.push_back(make_pair(block3, block3));
+  covariance_blocks.push_back(make_pair(block2, block3));
+  covariance_blocks.push_back(make_pair(block4, block1));  // reversed
+
+  Covariance::Options options;
+  CovarianceImpl covariance_impl(options);
+  EXPECT_TRUE(covariance_impl
+              .ComputeCovarianceSparsity(covariance_blocks, &problem));
+
+  const CompressedRowSparseMatrix* crsm = covariance_impl.covariance_matrix();
+
+  EXPECT_EQ(crsm->num_rows(), 7);
+  EXPECT_EQ(crsm->num_cols(), 7);
+  EXPECT_EQ(crsm->num_nonzeros(), 25);
+
+  const int* rows = crsm->rows();
+  for (int r = 0; r < crsm->num_rows() + 1; ++r) {
+    EXPECT_EQ(rows[r], expected_rows[r])
+        << r << " "
+        << rows[r] << " "
+        << expected_rows[r];
+  }
+
+  const int* cols = crsm->cols();
+  for (int c = 0; c < crsm->num_nonzeros(); ++c) {
+    EXPECT_EQ(cols[c], expected_cols[c])
+        << c << " "
+        << cols[c] << " "
+        << expected_cols[c];
+  }
+}
+
+class CovarianceTest : public ::testing::Test {
+ protected:
+  typedef map<const double*, pair<int, int>> BoundsMap;
+
+  virtual void SetUp() {
+    double* x = parameters_;
+    double* y = x + 2;
+    double* z = y + 3;
+
+    x[0] = 1;
+    x[1] = 1;
+    y[0] = 2;
+    y[1] = 2;
+    y[2] = 2;
+    z[0] = 3;
+
+    {
+      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};
+      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
+    }
+
+    {
+      double jacobian[] = { 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 2.0 };
+      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
+    }
+
+    {
+      double jacobian = 5.0;
+      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),
+                                NULL,
+                                z);
+    }
+
+    {
+      double jacobian1[] = { 1.0, 2.0, 3.0 };
+      double jacobian2[] = { -5.0, -6.0 };
+      problem_.AddResidualBlock(
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),
+          NULL,
+          y,
+          x);
+    }
+
+    {
+      double jacobian1[] = {2.0 };
+      double jacobian2[] = { 3.0, -2.0 };
+      problem_.AddResidualBlock(
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),
+          NULL,
+          z,
+          x);
+    }
+
+    all_covariance_blocks_.push_back(make_pair(x, x));
+    all_covariance_blocks_.push_back(make_pair(y, y));
+    all_covariance_blocks_.push_back(make_pair(z, z));
+    all_covariance_blocks_.push_back(make_pair(x, y));
+    all_covariance_blocks_.push_back(make_pair(x, z));
+    all_covariance_blocks_.push_back(make_pair(y, z));
+
+    column_bounds_[x] = make_pair(0, 2);
+    column_bounds_[y] = make_pair(2, 5);
+    column_bounds_[z] = make_pair(5, 6);
+  }
+
+  // Computes covariance in ambient space.
+  void ComputeAndCompareCovarianceBlocks(const Covariance::Options& options,
+                                         const double* expected_covariance) {
+    ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(
+        options,
+        true,  // ambient
+        expected_covariance);
+  }
+
+  // Computes covariance in tangent space.
+  void ComputeAndCompareCovarianceBlocksInTangentSpace(
+                                         const Covariance::Options& options,
+                                         const double* expected_covariance) {
+    ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(
+        options,
+        false,  // tangent
+        expected_covariance);
+  }
+
+  void ComputeAndCompareCovarianceBlocksInTangentOrAmbientSpace(
+      const Covariance::Options& options,
+      bool lift_covariance_to_ambient_space,
+      const double* expected_covariance) {
+    // Generate all possible combination of block pairs and check if the
+    // covariance computation is correct.
+    for (int i = 0; i <= 64; ++i) {
+      vector<pair<const double*, const double*>> covariance_blocks;
+      if (i & 1) {
+        covariance_blocks.push_back(all_covariance_blocks_[0]);
+      }
+
+      if (i & 2) {
+        covariance_blocks.push_back(all_covariance_blocks_[1]);
+      }
+
+      if (i & 4) {
+        covariance_blocks.push_back(all_covariance_blocks_[2]);
+      }
+
+      if (i & 8) {
+        covariance_blocks.push_back(all_covariance_blocks_[3]);
+      }
+
+      if (i & 16) {
+        covariance_blocks.push_back(all_covariance_blocks_[4]);
+      }
+
+      if (i & 32) {
+        covariance_blocks.push_back(all_covariance_blocks_[5]);
+      }
+
+      Covariance covariance(options);
+      EXPECT_TRUE(covariance.Compute(covariance_blocks, &problem_));
+
+      for (int i = 0; i < covariance_blocks.size(); ++i) {
+        const double* block1 = covariance_blocks[i].first;
+        const double* block2 = covariance_blocks[i].second;
+        // block1, block2
+        GetCovarianceBlockAndCompare(block1,
+                                     block2,
+                                     lift_covariance_to_ambient_space,
+                                     covariance,
+                                     expected_covariance);
+        // block2, block1
+        GetCovarianceBlockAndCompare(block2,
+                                     block1,
+                                     lift_covariance_to_ambient_space,
+                                     covariance,
+                                     expected_covariance);
+      }
+    }
+  }
+
+  void GetCovarianceBlockAndCompare(const double* block1,
+                                    const double* block2,
+                                    bool lift_covariance_to_ambient_space,
+                                    const Covariance& covariance,
+                                    const double* expected_covariance) {
+    const BoundsMap& column_bounds = lift_covariance_to_ambient_space ?
+        column_bounds_ : local_column_bounds_;
+    const int row_begin = FindOrDie(column_bounds, block1).first;
+    const int row_end = FindOrDie(column_bounds, block1).second;
+    const int col_begin = FindOrDie(column_bounds, block2).first;
+    const int col_end = FindOrDie(column_bounds, block2).second;
+
+    Matrix actual(row_end - row_begin, col_end - col_begin);
+    if (lift_covariance_to_ambient_space) {
+      EXPECT_TRUE(covariance.GetCovarianceBlock(block1,
+                                                block2,
+                                                actual.data()));
+    } else {
+      EXPECT_TRUE(covariance.GetCovarianceBlockInTangentSpace(block1,
+                                                              block2,
+                                                              actual.data()));
+    }
+
+    int dof = 0;  // degrees of freedom = sum of LocalSize()s
+    for (const auto& bound : column_bounds) {
+      dof = std::max(dof, bound.second.second);
+    }
+    ConstMatrixRef expected(expected_covariance, dof, dof);
+    double diff_norm = (expected.block(row_begin,
+                                       col_begin,
+                                       row_end - row_begin,
+                                       col_end - col_begin) - actual).norm();
+    diff_norm /= (row_end - row_begin) * (col_end - col_begin);
+
+    const double kTolerance = 1e-5;
+    EXPECT_NEAR(diff_norm, 0.0, kTolerance)
+        << "rows: " << row_begin << " " << row_end << "  "
+        << "cols: " << col_begin << " " << col_end << "  "
+        << "\n\n expected: \n " << expected.block(row_begin,
+                                                  col_begin,
+                                                  row_end - row_begin,
+                                                  col_end - col_begin)
+        << "\n\n actual: \n " << actual
+        << "\n\n full expected: \n" << expected;
+  }
+
+  double parameters_[6];
+  Problem problem_;
+  vector<pair<const double*, const double*>> all_covariance_blocks_;
+  BoundsMap column_bounds_;
+  BoundsMap local_column_bounds_;
+};
+
+
+TEST_F(CovarianceTest, NormalBehavior) {
+  // J
+  //
+  //   1  0  0  0  0  0
+  //   0  1  0  0  0  0
+  //   0  0  2  0  0  0
+  //   0  0  0  2  0  0
+  //   0  0  0  0  2  0
+  //   0  0  0  0  0  5
+  //  -5 -6  1  2  3  0
+  //   3 -2  0  0  0  2
+
+  // J'J
+  //
+  //   35  24 -5 -10 -15  6
+  //   24  41 -6 -12 -18 -4
+  //   -5  -6  5   2   3  0
+  //  -10 -12  2   8   6  0
+  //  -15 -18  3   6  13  0
+  //    6  -4  0   0   0 29
+
+  // inv(J'J) computed using octave.
+  double expected_covariance[] = {
+     7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT
+    -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT
+     1.6821e-02,   2.4758e-02,   2.4904e-01,  -1.9271e-03,  -2.8906e-03,  -6.5325e-05,  // NOLINT
+     3.3643e-02,   4.9517e-02,  -1.9271e-03,   2.4615e-01,  -5.7813e-03,  -1.3065e-04,  // NOLINT
+     5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT
+    -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT
+  };
+
+  Covariance::Options options;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+#ifdef CERES_USE_OPENMP
+
+TEST_F(CovarianceTest, ThreadedNormalBehavior) {
+  // J
+  //
+  //   1  0  0  0  0  0
+  //   0  1  0  0  0  0
+  //   0  0  2  0  0  0
+  //   0  0  0  2  0  0
+  //   0  0  0  0  2  0
+  //   0  0  0  0  0  5
+  //  -5 -6  1  2  3  0
+  //   3 -2  0  0  0  2
+
+  // J'J
+  //
+  //   35  24 -5 -10 -15  6
+  //   24  41 -6 -12 -18 -4
+  //   -5  -6  5   2   3  0
+  //  -10 -12  2   8   6  0
+  //  -15 -18  3   6  13  0
+  //    6  -4  0   0   0 29
+
+  // inv(J'J) computed using octave.
+  double expected_covariance[] = {
+     7.0747e-02,  -8.4923e-03,   1.6821e-02,   3.3643e-02,   5.0464e-02,  -1.5809e-02,  // NOLINT
+    -8.4923e-03,   8.1352e-02,   2.4758e-02,   4.9517e-02,   7.4275e-02,   1.2978e-02,  // NOLINT
+     1.6821e-02,   2.4758e-02,   2.4904e-01,  -1.9271e-03,  -2.8906e-03,  -6.5325e-05,  // NOLINT
+     3.3643e-02,   4.9517e-02,  -1.9271e-03,   2.4615e-01,  -5.7813e-03,  -1.3065e-04,  // NOLINT
+     5.0464e-02,   7.4275e-02,  -2.8906e-03,  -5.7813e-03,   2.4133e-01,  -1.9598e-04,  // NOLINT
+    -1.5809e-02,   1.2978e-02,  -6.5325e-05,  -1.3065e-04,  -1.9598e-04,   3.9544e-02,  // NOLINT
+  };
+
+  Covariance::Options options;
+  options.num_threads = 4;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+#endif  // CERES_USE_OPENMP
+
+TEST_F(CovarianceTest, ConstantParameterBlock) {
+  problem_.SetParameterBlockConstant(parameters_);
+
+  // J
+  //
+  //  0  0  0  0  0  0
+  //  0  0  0  0  0  0
+  //  0  0  2  0  0  0
+  //  0  0  0  2  0  0
+  //  0  0  0  0  2  0
+  //  0  0  0  0  0  5
+  //  0  0  1  2  3  0
+  //  0  0  0  0  0  2
+
+  // J'J
+  //
+  //  0  0  0  0  0  0
+  //  0  0  0  0  0  0
+  //  0  0  5  2  3  0
+  //  0  0  2  8  6  0
+  //  0  0  3  6 13  0
+  //  0  0  0  0  0 29
+
+  // pinv(J'J) computed using octave.
+  double expected_covariance[] = {
+              0,            0,            0,            0,            0,            0,  // NOLINT
+              0,            0,            0,            0,            0,            0,  // NOLINT
+              0,            0,      0.23611,     -0.02778,     -0.04167,     -0.00000,  // NOLINT
+              0,            0,     -0.02778,      0.19444,     -0.08333,     -0.00000,  // NOLINT
+              0,            0,     -0.04167,     -0.08333,      0.12500,     -0.00000,  // NOLINT
+              0,            0,     -0.00000,     -0.00000,     -0.00000,      0.03448   // NOLINT
+  };
+
+  Covariance::Options options;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, LocalParameterization) {
+  double* x = parameters_;
+  double* y = x + 2;
+
+  problem_.SetParameterization(x, new PolynomialParameterization);
+
+  vector<int> subset;
+  subset.push_back(2);
+  problem_.SetParameterization(y, new SubsetParameterization(3, subset));
+
+  // Raw Jacobian: J
+  //
+  //   1   0  0  0  0  0
+  //   0   1  0  0  0  0
+  //   0   0  2  0  0  0
+  //   0   0  0  2  0  0
+  //   0   0  0  0  2  0
+  //   0   0  0  0  0  5
+  //  -5  -6  1  2  3  0
+  //   3  -2  0  0  0  2
+
+  // Local to global jacobian: A
+  //
+  //  1   0   0   0
+  //  1   0   0   0
+  //  0   1   0   0
+  //  0   0   1   0
+  //  0   0   0   0
+  //  0   0   0   1
+
+  // A * inv((J*A)'*(J*A)) * A'
+  // Computed using octave.
+  double expected_covariance[] = {
+    0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,
+    0.01766,   0.01766,   0.02158,   0.04316,   0.00000,  -0.00122,
+    0.02158,   0.02158,   0.24860,  -0.00281,   0.00000,  -0.00149,
+    0.04316,   0.04316,  -0.00281,   0.24439,   0.00000,  -0.00298,
+    0.00000,   0.00000,   0.00000,   0.00000,   0.00000,   0.00000,
+   -0.00122,  -0.00122,  -0.00149,  -0.00298,   0.00000,   0.03457
+  };
+
+  Covariance::Options options;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, LocalParameterizationInTangentSpace) {
+  double* x = parameters_;
+  double* y = x + 2;
+  double* z = y + 3;
+
+  problem_.SetParameterization(x, new PolynomialParameterization);
+
+  vector<int> subset;
+  subset.push_back(2);
+  problem_.SetParameterization(y, new SubsetParameterization(3, subset));
+
+  local_column_bounds_[x] = make_pair(0, 1);
+  local_column_bounds_[y] = make_pair(1, 3);
+  local_column_bounds_[z] = make_pair(3, 4);
+
+  // Raw Jacobian: J
+  //
+  //   1   0  0  0  0  0
+  //   0   1  0  0  0  0
+  //   0   0  2  0  0  0
+  //   0   0  0  2  0  0
+  //   0   0  0  0  2  0
+  //   0   0  0  0  0  5
+  //  -5  -6  1  2  3  0
+  //   3  -2  0  0  0  2
+
+  // Local to global jacobian: A
+  //
+  //  1   0   0   0
+  //  1   0   0   0
+  //  0   1   0   0
+  //  0   0   1   0
+  //  0   0   0   0
+  //  0   0   0   1
+
+  // inv((J*A)'*(J*A))
+  // Computed using octave.
+  double expected_covariance[] = {
+    0.01766,   0.02158,   0.04316,   -0.00122,
+    0.02158,   0.24860,  -0.00281,   -0.00149,
+    0.04316,  -0.00281,   0.24439,   -0.00298,
+   -0.00122,  -0.00149,  -0.00298,    0.03457  // NOLINT
+  };
+
+  Covariance::Options options;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, LocalParameterizationInTangentSpaceWithConstantBlocks) {
+  double* x = parameters_;
+  double* y = x + 2;
+  double* z = y + 3;
+
+  problem_.SetParameterization(x, new PolynomialParameterization);
+  problem_.SetParameterBlockConstant(x);
+
+  vector<int> subset;
+  subset.push_back(2);
+  problem_.SetParameterization(y, new SubsetParameterization(3, subset));
+  problem_.SetParameterBlockConstant(y);
+
+  local_column_bounds_[x] = make_pair(0, 1);
+  local_column_bounds_[y] = make_pair(1, 3);
+  local_column_bounds_[z] = make_pair(3, 4);
+
+  // Raw Jacobian: J
+  //
+  //   1   0  0  0  0  0
+  //   0   1  0  0  0  0
+  //   0   0  2  0  0  0
+  //   0   0  0  2  0  0
+  //   0   0  0  0  2  0
+  //   0   0  0  0  0  5
+  //  -5  -6  1  2  3  0
+  //   3  -2  0  0  0  2
+
+  // Local to global jacobian: A
+  //
+  //  0   0   0   0
+  //  0   0   0   0
+  //  0   0   0   0
+  //  0   0   0   0
+  //  0   0   0   0
+  //  0   0   0   1
+
+  // pinv((J*A)'*(J*A))
+  // Computed using octave.
+  double expected_covariance[] = {
+    0.0, 0.0, 0.0, 0.0,
+    0.0, 0.0, 0.0, 0.0,
+    0.0, 0.0, 0.0, 0.0,
+    0.0, 0.0, 0.0, 0.034482 // NOLINT
+  };
+
+  Covariance::Options options;
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, TruncatedRank) {
+  // J
+  //
+  //   1  0  0  0  0  0
+  //   0  1  0  0  0  0
+  //   0  0  2  0  0  0
+  //   0  0  0  2  0  0
+  //   0  0  0  0  2  0
+  //   0  0  0  0  0  5
+  //  -5 -6  1  2  3  0
+  //   3 -2  0  0  0  2
+
+  // J'J
+  //
+  //   35  24 -5 -10 -15  6
+  //   24  41 -6 -12 -18 -4
+  //   -5  -6  5   2   3  0
+  //  -10 -12  2   8   6  0
+  //  -15 -18  3   6  13  0
+  //    6  -4  0   0   0 29
+
+  // 3.4142 is the smallest eigen value of J'J. The following matrix
+  // was obtained by dropping the eigenvector corresponding to this
+  // eigenvalue.
+  double expected_covariance[] = {
+     5.4135e-02,  -3.5121e-02,   1.7257e-04,   3.4514e-04,   5.1771e-04,  -1.6076e-02,  // NOLINT
+    -3.5121e-02,   3.8667e-02,  -1.9288e-03,  -3.8576e-03,  -5.7864e-03,   1.2549e-02,  // NOLINT
+     1.7257e-04,  -1.9288e-03,   2.3235e-01,  -3.5297e-02,  -5.2946e-02,  -3.3329e-04,  // NOLINT
+     3.4514e-04,  -3.8576e-03,  -3.5297e-02,   1.7941e-01,  -1.0589e-01,  -6.6659e-04,  // NOLINT
+     5.1771e-04,  -5.7864e-03,  -5.2946e-02,  -1.0589e-01,   9.1162e-02,  -9.9988e-04,  // NOLINT
+    -1.6076e-02,   1.2549e-02,  -3.3329e-04,  -6.6659e-04,  -9.9988e-04,   3.9539e-02   // NOLINT
+  };
+
+
+  {
+    Covariance::Options options;
+    options.algorithm_type = DENSE_SVD;
+    // Force dropping of the smallest eigenvector.
+    options.null_space_rank = 1;
+    ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+  }
+
+  {
+    Covariance::Options options;
+    options.algorithm_type = DENSE_SVD;
+    // Force dropping of the smallest eigenvector via the ratio but
+    // automatic truncation.
+    options.min_reciprocal_condition_number = 0.044494;
+    options.null_space_rank = -1;
+    ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+  }
+}
+
+TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParameters) {
+  Covariance::Options options;
+  Covariance covariance(options);
+  double* x = parameters_;
+  double* y = x + 2;
+  double* z = y + 3;
+  vector<const double*> parameter_blocks;
+  parameter_blocks.push_back(x);
+  parameter_blocks.push_back(y);
+  parameter_blocks.push_back(z);
+  covariance.Compute(parameter_blocks, &problem_);
+  double expected_covariance[36];
+  covariance.GetCovarianceMatrix(parameter_blocks, expected_covariance);
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParametersThreaded) {
+  Covariance::Options options;
+  options.num_threads = 4;
+  Covariance covariance(options);
+  double* x = parameters_;
+  double* y = x + 2;
+  double* z = y + 3;
+  vector<const double*> parameter_blocks;
+  parameter_blocks.push_back(x);
+  parameter_blocks.push_back(y);
+  parameter_blocks.push_back(z);
+  covariance.Compute(parameter_blocks, &problem_);
+  double expected_covariance[36];
+  covariance.GetCovarianceMatrix(parameter_blocks, expected_covariance);
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, DenseCovarianceMatrixFromSetOfParametersInTangentSpace) {
+  Covariance::Options options;
+  Covariance covariance(options);
+  double* x = parameters_;
+  double* y = x + 2;
+  double* z = y + 3;
+
+  problem_.SetParameterization(x, new PolynomialParameterization);
+
+  vector<int> subset;
+  subset.push_back(2);
+  problem_.SetParameterization(y, new SubsetParameterization(3, subset));
+
+  local_column_bounds_[x] = make_pair(0, 1);
+  local_column_bounds_[y] = make_pair(1, 3);
+  local_column_bounds_[z] = make_pair(3, 4);
+
+  vector<const double*> parameter_blocks;
+  parameter_blocks.push_back(x);
+  parameter_blocks.push_back(y);
+  parameter_blocks.push_back(z);
+  covariance.Compute(parameter_blocks, &problem_);
+  double expected_covariance[16];
+  covariance.GetCovarianceMatrixInTangentSpace(parameter_blocks,
+                                               expected_covariance);
+
+#ifndef CERES_NO_SUITESPARSE
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+#endif
+
+  options.algorithm_type = DENSE_SVD;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+
+  options.algorithm_type = SPARSE_QR;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  ComputeAndCompareCovarianceBlocksInTangentSpace(options, expected_covariance);
+}
+
+TEST_F(CovarianceTest, ComputeCovarianceFailure) {
+  Covariance::Options options;
+  Covariance covariance(options);
+  double* x = parameters_;
+  double* y = x + 2;
+  vector<const double*> parameter_blocks;
+  parameter_blocks.push_back(x);
+  parameter_blocks.push_back(x);
+  parameter_blocks.push_back(y);
+  parameter_blocks.push_back(y);
+  EXPECT_DEATH_IF_SUPPORTED(covariance.Compute(parameter_blocks, &problem_),
+                            "Covariance::Compute called with duplicate blocks "
+                            "at indices \\(0, 1\\) and \\(2, 3\\)");
+  vector<pair<const double*, const double*>> covariance_blocks;
+  covariance_blocks.push_back(make_pair(x, x));
+  covariance_blocks.push_back(make_pair(x, x));
+  covariance_blocks.push_back(make_pair(y, y));
+  covariance_blocks.push_back(make_pair(y, y));
+  EXPECT_DEATH_IF_SUPPORTED(covariance.Compute(covariance_blocks, &problem_),
+                            "Covariance::Compute called with duplicate blocks "
+                            "at indices \\(0, 1\\) and \\(2, 3\\)");
+}
+
+class RankDeficientCovarianceTest : public CovarianceTest {
+ protected:
+  virtual void SetUp() {
+    double* x = parameters_;
+    double* y = x + 2;
+    double* z = y + 3;
+
+    {
+      double jacobian[] = { 1.0, 0.0, 0.0, 1.0};
+      problem_.AddResidualBlock(new UnaryCostFunction(2, 2, jacobian), NULL, x);
+    }
+
+    {
+      double jacobian[] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+      problem_.AddResidualBlock(new UnaryCostFunction(3, 3, jacobian), NULL, y);
+    }
+
+    {
+      double jacobian = 5.0;
+      problem_.AddResidualBlock(new UnaryCostFunction(1, 1, &jacobian),
+                                NULL,
+                                z);
+    }
+
+    {
+      double jacobian1[] = { 0.0, 0.0, 0.0 };
+      double jacobian2[] = { -5.0, -6.0 };
+      problem_.AddResidualBlock(
+          new BinaryCostFunction(1, 3, 2, jacobian1, jacobian2),
+          NULL,
+          y,
+          x);
+    }
+
+    {
+      double jacobian1[] = {2.0 };
+      double jacobian2[] = { 3.0, -2.0 };
+      problem_.AddResidualBlock(
+          new BinaryCostFunction(1, 1, 2, jacobian1, jacobian2),
+          NULL,
+          z,
+          x);
+    }
+
+    all_covariance_blocks_.push_back(make_pair(x, x));
+    all_covariance_blocks_.push_back(make_pair(y, y));
+    all_covariance_blocks_.push_back(make_pair(z, z));
+    all_covariance_blocks_.push_back(make_pair(x, y));
+    all_covariance_blocks_.push_back(make_pair(x, z));
+    all_covariance_blocks_.push_back(make_pair(y, z));
+
+    column_bounds_[x] = make_pair(0, 2);
+    column_bounds_[y] = make_pair(2, 5);
+    column_bounds_[z] = make_pair(5, 6);
+  }
+};
+
+TEST_F(RankDeficientCovarianceTest, AutomaticTruncation) {
+  // J
+  //
+  //   1  0  0  0  0  0
+  //   0  1  0  0  0  0
+  //   0  0  0  0  0  0
+  //   0  0  0  0  0  0
+  //   0  0  0  0  0  0
+  //   0  0  0  0  0  5
+  //  -5 -6  0  0  0  0
+  //   3 -2  0  0  0  2
+
+  // J'J
+  //
+  //  35 24  0  0  0  6
+  //  24 41  0  0  0 -4
+  //   0  0  0  0  0  0
+  //   0  0  0  0  0  0
+  //   0  0  0  0  0  0
+  //   6 -4  0  0  0 29
+
+  // pinv(J'J) computed using octave.
+  double expected_covariance[] = {
+     0.053998,  -0.033145,   0.000000,   0.000000,   0.000000,  -0.015744,
+    -0.033145,   0.045067,   0.000000,   0.000000,   0.000000,   0.013074,
+     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,
+     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,
+     0.000000,   0.000000,   0.000000,   0.000000,   0.000000,   0.000000,
+    -0.015744,   0.013074,   0.000000,   0.000000,   0.000000,   0.039543
+  };
+
+  Covariance::Options options;
+  options.algorithm_type = DENSE_SVD;
+  options.null_space_rank = -1;
+  ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+}
+
+class LargeScaleCovarianceTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    num_parameter_blocks_ = 2000;
+    parameter_block_size_ = 5;
+    parameters_.reset(
+        new double[parameter_block_size_ * num_parameter_blocks_]);
+
+    Matrix jacobian(parameter_block_size_, parameter_block_size_);
+    for (int i = 0; i < num_parameter_blocks_; ++i) {
+      jacobian.setIdentity();
+      jacobian *= (i + 1);
+
+      double* block_i = parameters_.get() + i * parameter_block_size_;
+      problem_.AddResidualBlock(new UnaryCostFunction(parameter_block_size_,
+                                                      parameter_block_size_,
+                                                      jacobian.data()),
+                                NULL,
+                                block_i);
+      for (int j = i; j < num_parameter_blocks_; ++j) {
+        double* block_j = parameters_.get() + j * parameter_block_size_;
+        all_covariance_blocks_.push_back(make_pair(block_i, block_j));
+      }
+    }
+  }
+
+  void ComputeAndCompare(
+      CovarianceAlgorithmType algorithm_type,
+      SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      int num_threads) {
+    Covariance::Options options;
+    options.algorithm_type = algorithm_type;
+    options.sparse_linear_algebra_library_type =
+        sparse_linear_algebra_library_type;
+    options.num_threads = num_threads;
+    Covariance covariance(options);
+    EXPECT_TRUE(covariance.Compute(all_covariance_blocks_, &problem_));
+
+    Matrix expected(parameter_block_size_, parameter_block_size_);
+    Matrix actual(parameter_block_size_, parameter_block_size_);
+    const double kTolerance = 1e-16;
+
+    for (int i = 0; i < num_parameter_blocks_; ++i) {
+      expected.setIdentity();
+      expected /= (i + 1.0) * (i + 1.0);
+
+      double* block_i = parameters_.get() + i * parameter_block_size_;
+      covariance.GetCovarianceBlock(block_i, block_i, actual.data());
+      EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)
+          << "block: " << i << ", " << i << "\n"
+          << "expected: \n" << expected << "\n"
+          << "actual: \n" << actual;
+
+      expected.setZero();
+      for (int j = i + 1; j < num_parameter_blocks_; ++j) {
+        double* block_j = parameters_.get() + j * parameter_block_size_;
+        covariance.GetCovarianceBlock(block_i, block_j, actual.data());
+        EXPECT_NEAR((expected - actual).norm(), 0.0, kTolerance)
+            << "block: " << i << ", " << j << "\n"
+            << "expected: \n" << expected << "\n"
+            << "actual: \n" << actual;
+      }
+    }
+  }
+
+  std::unique_ptr<double[]> parameters_;
+  int parameter_block_size_;
+  int num_parameter_blocks_;
+
+  Problem problem_;
+  vector<pair<const double*, const double*>> all_covariance_blocks_;
+};
+
+#if !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
+
+TEST_F(LargeScaleCovarianceTest, Parallel) {
+  ComputeAndCompare(SPARSE_QR, SUITE_SPARSE, 4);
+}
+
+#endif  // !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/cubic_interpolation_test.cc b/internal/ceres/cubic_interpolation_test.cc
new file mode 100644
index 0000000..d68af22
--- /dev/null
+++ b/internal/ceres/cubic_interpolation_test.cc
@@ -0,0 +1,510 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/cubic_interpolation.h"
+
+#include <memory>
+#include "ceres/jet.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+static const double kTolerance = 1e-12;
+
+TEST(Grid1D, OneDataDimension) {
+  int x[] = {1, 2, 3};
+  Grid1D<int, 1> grid(x, 0, 3);
+  for (int i = 0; i < 3; ++i) {
+    double value;
+    grid.GetValue(i, &value);
+    EXPECT_EQ(value, static_cast<double>(i + 1));
+  }
+}
+
+TEST(Grid1D, OneDataDimensionOutOfBounds) {
+  int x[] = {1, 2, 3};
+  Grid1D<int, 1> grid(x, 0, 3);
+  double value;
+  grid.GetValue(-1, &value);
+  EXPECT_EQ(value, x[0]);
+  grid.GetValue(-2, &value);
+  EXPECT_EQ(value, x[0]);
+  grid.GetValue(3, &value);
+  EXPECT_EQ(value, x[2]);
+  grid.GetValue(4, &value);
+  EXPECT_EQ(value, x[2]);
+}
+
+TEST(Grid1D, TwoDataDimensionIntegerDataInterleaved) {
+  int x[] = {1, 5,
+             2, 6,
+             3, 7};
+
+  Grid1D<int, 2, true> grid(x, 0, 3);
+  for (int i = 0; i < 3; ++i) {
+    double value[2];
+    grid.GetValue(i, value);
+    EXPECT_EQ(value[0], static_cast<double>(i + 1));
+    EXPECT_EQ(value[1], static_cast<double>(i + 5));
+  }
+}
+
+
+TEST(Grid1D, TwoDataDimensionIntegerDataStacked) {
+  int x[] = {1, 2, 3,
+             5, 6, 7};
+
+  Grid1D<int, 2, false> grid(x, 0, 3);
+  for (int i = 0; i < 3; ++i) {
+    double value[2];
+    grid.GetValue(i, value);
+    EXPECT_EQ(value[0], static_cast<double>(i + 1));
+    EXPECT_EQ(value[1], static_cast<double>(i + 5));
+  }
+}
+
+TEST(Grid2D, OneDataDimensionRowMajor) {
+  int x[] = {1, 2, 3,
+             2, 3, 4};
+  Grid2D<int, 1, true, true> grid(x, 0, 2, 0, 3);
+  for (int r = 0; r < 2; ++r) {
+    for (int c = 0; c < 3; ++c) {
+      double value;
+      grid.GetValue(r, c, &value);
+      EXPECT_EQ(value, static_cast<double>(r + c + 1));
+    }
+  }
+}
+
+TEST(Grid2D, OneDataDimensionRowMajorOutOfBounds) {
+  int x[] = {1, 2, 3,
+             2, 3, 4};
+  Grid2D<int, 1, true, true> grid(x, 0, 2, 0, 3);
+  double value;
+  grid.GetValue(-1, -1, &value);
+  EXPECT_EQ(value, x[0]);
+  grid.GetValue(-1, 0, &value);
+  EXPECT_EQ(value, x[0]);
+  grid.GetValue(-1, 1, &value);
+  EXPECT_EQ(value, x[1]);
+  grid.GetValue(-1, 2, &value);
+  EXPECT_EQ(value, x[2]);
+  grid.GetValue(-1, 3, &value);
+  EXPECT_EQ(value, x[2]);
+  grid.GetValue(0, 3, &value);
+  EXPECT_EQ(value, x[2]);
+  grid.GetValue(1, 3, &value);
+  EXPECT_EQ(value, x[5]);
+  grid.GetValue(2, 3, &value);
+  EXPECT_EQ(value, x[5]);
+  grid.GetValue(2, 2, &value);
+  EXPECT_EQ(value, x[5]);
+  grid.GetValue(2, 1, &value);
+  EXPECT_EQ(value, x[4]);
+  grid.GetValue(2, 0, &value);
+  EXPECT_EQ(value, x[3]);
+  grid.GetValue(2, -1, &value);
+  EXPECT_EQ(value, x[3]);
+  grid.GetValue(1, -1, &value);
+  EXPECT_EQ(value, x[3]);
+  grid.GetValue(0, -1, &value);
+  EXPECT_EQ(value, x[0]);
+}
+
+TEST(Grid2D, TwoDataDimensionRowMajorInterleaved) {
+  int x[] = {1, 4, 2, 8, 3, 12,
+             2, 8, 3, 12, 4, 16};
+  Grid2D<int, 2, true, true> grid(x, 0, 2, 0, 3);
+  for (int r = 0; r < 2; ++r) {
+    for (int c = 0; c < 3; ++c) {
+      double value[2];
+      grid.GetValue(r, c, value);
+      EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
+      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+    }
+  }
+}
+
+TEST(Grid2D, TwoDataDimensionRowMajorStacked) {
+  int x[] = {1,  2,  3,
+             2,  3,  4,
+             4,  8, 12,
+             8, 12, 16};
+  Grid2D<int, 2, true, false> grid(x, 0, 2, 0, 3);
+  for (int r = 0; r < 2; ++r) {
+    for (int c = 0; c < 3; ++c) {
+      double value[2];
+      grid.GetValue(r, c, value);
+      EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
+      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+    }
+  }
+}
+
+TEST(Grid2D, TwoDataDimensionColMajorInterleaved) {
+  int x[] = { 1,  4, 2,  8,
+              2,  8, 3, 12,
+              3, 12, 4, 16};
+  Grid2D<int, 2, false, true> grid(x, 0, 2, 0, 3);
+  for (int r = 0; r < 2; ++r) {
+    for (int c = 0; c < 3; ++c) {
+      double value[2];
+      grid.GetValue(r, c, value);
+      EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
+      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+    }
+  }
+}
+
+TEST(Grid2D, TwoDataDimensionColMajorStacked) {
+  int x[] = {1,   2,
+             2,   3,
+             3,   4,
+             4,   8,
+             8,  12,
+             12, 16};
+  Grid2D<int, 2, false, false> grid(x, 0, 2, 0, 3);
+  for (int r = 0; r < 2; ++r) {
+    for (int c = 0; c < 3; ++c) {
+      double value[2];
+      grid.GetValue(r, c, value);
+      EXPECT_EQ(value[0], static_cast<double>(r + c + 1));
+      EXPECT_EQ(value[1], static_cast<double>(4 *(r + c + 1)));
+    }
+  }
+}
+
+class CubicInterpolatorTest : public ::testing::Test {
+ public:
+  template <int kDataDimension>
+  void RunPolynomialInterpolationTest(const double a,
+                                      const double b,
+                                      const double c,
+                                      const double d) {
+    values_.reset(new double[kDataDimension * kNumSamples]);
+
+    for (int x = 0; x < kNumSamples; ++x) {
+      for (int dim = 0; dim < kDataDimension; ++dim) {
+      values_[x * kDataDimension + dim] =
+          (dim * dim  + 1) * (a  * x * x * x + b * x * x + c * x + d);
+      }
+    }
+
+    Grid1D<double, kDataDimension> grid(values_.get(), 0, kNumSamples);
+    CubicInterpolator<Grid1D<double, kDataDimension>> interpolator(grid);
+
+    // Check values in the all the cells but the first and the last
+    // ones. In these cells, the interpolated function values should
+    // match exactly the values of the function being interpolated.
+    //
+    // On the boundary, we extrapolate the values of the function on
+    // the basis of its first derivative, so we do not expect the
+    // function values and its derivatives not to match.
+    for (int j = 0; j < kNumTestSamples; ++j) {
+      const double x = 1.0 + 7.0 / (kNumTestSamples - 1) * j;
+      double expected_f[kDataDimension], expected_dfdx[kDataDimension];
+      double f[kDataDimension], dfdx[kDataDimension];
+
+      for (int dim = 0; dim < kDataDimension; ++dim) {
+        expected_f[dim] =
+            (dim * dim  + 1) * (a  * x * x * x + b * x * x + c * x + d);
+        expected_dfdx[dim] = (dim * dim + 1) * (3.0 * a * x * x + 2.0 * b * x + c);
+      }
+
+      interpolator.Evaluate(x, f, dfdx);
+      for (int dim = 0; dim < kDataDimension; ++dim) {
+        EXPECT_NEAR(f[dim], expected_f[dim], kTolerance)
+            << "x: " << x << " dim: " << dim
+            << " actual f(x): " << expected_f[dim]
+            << " estimated f(x): " << f[dim];
+        EXPECT_NEAR(dfdx[dim], expected_dfdx[dim], kTolerance)
+            << "x: " << x << " dim: " << dim
+            << " actual df(x)/dx: " << expected_dfdx[dim]
+            << " estimated df(x)/dx: " << dfdx[dim];
+      }
+    }
+  }
+
+ private:
+  static const int kNumSamples = 10;
+  static const int kNumTestSamples = 100;
+  std::unique_ptr<double[]> values_;
+};
+
+TEST_F(CubicInterpolatorTest, ConstantFunction) {
+  RunPolynomialInterpolationTest<1>(0.0, 0.0, 0.0, 0.5);
+  RunPolynomialInterpolationTest<2>(0.0, 0.0, 0.0, 0.5);
+  RunPolynomialInterpolationTest<3>(0.0, 0.0, 0.0, 0.5);
+}
+
+TEST_F(CubicInterpolatorTest, LinearFunction) {
+  RunPolynomialInterpolationTest<1>(0.0, 0.0, 1.0, 0.5);
+  RunPolynomialInterpolationTest<2>(0.0, 0.0, 1.0, 0.5);
+  RunPolynomialInterpolationTest<3>(0.0, 0.0, 1.0, 0.5);
+}
+
+TEST_F(CubicInterpolatorTest, QuadraticFunction) {
+  RunPolynomialInterpolationTest<1>(0.0, 0.4, 1.0, 0.5);
+  RunPolynomialInterpolationTest<2>(0.0, 0.4, 1.0, 0.5);
+  RunPolynomialInterpolationTest<3>(0.0, 0.4, 1.0, 0.5);
+}
+
+
+TEST(CubicInterpolator, JetEvaluation) {
+  const double values[] = {1.0, 2.0, 2.0, 5.0, 3.0, 9.0, 2.0, 7.0};
+
+  Grid1D<double, 2, true> grid(values, 0, 4);
+  CubicInterpolator<Grid1D<double, 2, true>> interpolator(grid);
+
+  double f[2], dfdx[2];
+  const double x = 2.5;
+  interpolator.Evaluate(x, f, dfdx);
+
+  // Create a Jet with the same scalar part as x, so that the output
+  // Jet will be evaluated at x.
+  Jet<double, 4> x_jet;
+  x_jet.a = x;
+  x_jet.v(0) = 1.0;
+  x_jet.v(1) = 1.1;
+  x_jet.v(2) = 1.2;
+  x_jet.v(3) = 1.3;
+
+  Jet<double, 4> f_jets[2];
+  interpolator.Evaluate(x_jet, f_jets);
+
+  // Check that the scalar part of the Jet is f(x).
+  EXPECT_EQ(f_jets[0].a, f[0]);
+  EXPECT_EQ(f_jets[1].a, f[1]);
+
+  // Check that the derivative part of the Jet is dfdx * x_jet.v
+  // by the chain rule.
+  EXPECT_NEAR((f_jets[0].v - dfdx[0] * x_jet.v).norm(), 0.0, kTolerance);
+  EXPECT_NEAR((f_jets[1].v - dfdx[1] * x_jet.v).norm(), 0.0, kTolerance);
+}
+
+class BiCubicInterpolatorTest : public ::testing::Test {
+ public:
+  template <int kDataDimension>
+  void RunPolynomialInterpolationTest(const Eigen::Matrix3d& coeff) {
+    values_.reset(new double[kNumRows * kNumCols * kDataDimension]);
+    coeff_ = coeff;
+    double* v = values_.get();
+    for (int r = 0; r < kNumRows; ++r) {
+      for (int c = 0; c < kNumCols; ++c) {
+        for (int dim = 0; dim < kDataDimension; ++dim) {
+          *v++ = (dim * dim + 1) * EvaluateF(r, c);
+        }
+      }
+    }
+
+    Grid2D<double, kDataDimension> grid(values_.get(), 0, kNumRows, 0, kNumCols);
+    BiCubicInterpolator<Grid2D<double, kDataDimension>> interpolator(grid);
+
+    for (int j = 0; j < kNumRowSamples; ++j) {
+      const double r = 1.0 + 7.0 / (kNumRowSamples - 1) * j;
+      for (int k = 0; k < kNumColSamples; ++k) {
+        const double c = 1.0 + 7.0 / (kNumColSamples - 1) * k;
+        double f[kDataDimension], dfdr[kDataDimension], dfdc[kDataDimension];
+        interpolator.Evaluate(r, c, f, dfdr, dfdc);
+        for (int dim = 0; dim < kDataDimension; ++dim) {
+          EXPECT_NEAR(f[dim], (dim * dim + 1) * EvaluateF(r, c), kTolerance);
+          EXPECT_NEAR(dfdr[dim], (dim * dim + 1) * EvaluatedFdr(r, c), kTolerance);
+          EXPECT_NEAR(dfdc[dim], (dim * dim + 1) * EvaluatedFdc(r, c), kTolerance);
+        }
+      }
+    }
+  }
+
+ private:
+  double EvaluateF(double r, double c) {
+    Eigen::Vector3d x;
+    x(0) = r;
+    x(1) = c;
+    x(2) = 1;
+    return x.transpose() * coeff_ * x;
+  }
+
+  double EvaluatedFdr(double r, double c) {
+    Eigen::Vector3d x;
+    x(0) = r;
+    x(1) = c;
+    x(2) = 1;
+    return (coeff_.row(0) + coeff_.col(0).transpose()) * x;
+  }
+
+  double EvaluatedFdc(double r, double c) {
+    Eigen::Vector3d x;
+    x(0) = r;
+    x(1) = c;
+    x(2) = 1;
+    return (coeff_.row(1) + coeff_.col(1).transpose()) * x;
+  }
+
+
+  Eigen::Matrix3d coeff_;
+  static const int kNumRows = 10;
+  static const int kNumCols = 10;
+  static const int kNumRowSamples = 100;
+  static const int kNumColSamples = 100;
+  std::unique_ptr<double[]> values_;
+};
+
+TEST_F(BiCubicInterpolatorTest, ZeroFunction) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree00Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree01Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 2) = 0.1;
+  coeff(2, 0) = 0.1;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree10Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 1) = 0.1;
+  coeff(1, 0) = 0.1;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree11Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 1) = 0.1;
+  coeff(1, 0) = 0.1;
+  coeff(0, 2) = 0.2;
+  coeff(2, 0) = 0.2;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree12Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 1) = 0.1;
+  coeff(1, 0) = 0.1;
+  coeff(0, 2) = 0.2;
+  coeff(2, 0) = 0.2;
+  coeff(1, 1) = 0.3;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree21Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 1) = 0.1;
+  coeff(1, 0) = 0.1;
+  coeff(0, 2) = 0.2;
+  coeff(2, 0) = 0.2;
+  coeff(0, 0) = 0.3;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST_F(BiCubicInterpolatorTest, Degree22Function) {
+  Eigen::Matrix3d coeff = Eigen::Matrix3d::Zero();
+  coeff(2, 2) = 1.0;
+  coeff(0, 1) = 0.1;
+  coeff(1, 0) = 0.1;
+  coeff(0, 2) = 0.2;
+  coeff(2, 0) = 0.2;
+  coeff(0, 0) = 0.3;
+  coeff(0, 1) = -0.4;
+  coeff(1, 0) = -0.4;
+  RunPolynomialInterpolationTest<1>(coeff);
+  RunPolynomialInterpolationTest<2>(coeff);
+  RunPolynomialInterpolationTest<3>(coeff);
+}
+
+TEST(BiCubicInterpolator, JetEvaluation) {
+  const double values[] = {1.0, 5.0, 2.0, 10.0, 2.0, 6.0, 3.0, 5.0,
+                           1.0, 2.0, 2.0,  2.0, 2.0, 2.0, 3.0, 1.0};
+
+  Grid2D<double, 2> grid(values, 0, 2, 0, 4);
+  BiCubicInterpolator<Grid2D<double, 2>> interpolator(grid);
+
+  double f[2], dfdr[2], dfdc[2];
+  const double r = 0.5;
+  const double c = 2.5;
+  interpolator.Evaluate(r, c, f, dfdr, dfdc);
+
+  // Create a Jet with the same scalar part as x, so that the output
+  // Jet will be evaluated at x.
+  Jet<double, 4> r_jet;
+  r_jet.a = r;
+  r_jet.v(0) = 1.0;
+  r_jet.v(1) = 1.1;
+  r_jet.v(2) = 1.2;
+  r_jet.v(3) = 1.3;
+
+  Jet<double, 4> c_jet;
+  c_jet.a = c;
+  c_jet.v(0) = 2.0;
+  c_jet.v(1) = 3.1;
+  c_jet.v(2) = 4.2;
+  c_jet.v(3) = 5.3;
+
+  Jet<double, 4> f_jets[2];
+  interpolator.Evaluate(r_jet, c_jet, f_jets);
+  EXPECT_EQ(f_jets[0].a, f[0]);
+  EXPECT_EQ(f_jets[1].a, f[1]);
+  EXPECT_NEAR((f_jets[0].v - dfdr[0] * r_jet.v - dfdc[0] * c_jet.v).norm(),
+              0.0,
+              kTolerance);
+  EXPECT_NEAR((f_jets[1].v - dfdr[1] * r_jet.v - dfdc[1] * c_jet.v).norm(),
+              0.0,
+              kTolerance);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
new file mode 100644
index 0000000..5a02877
--- /dev/null
+++ b/internal/ceres/cxsparse.cc
@@ -0,0 +1,284 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "ceres/cxsparse.h"
+
+#include <string>
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {}
+
+CXSparse::~CXSparse() {
+  if (scratch_size_ > 0) {
+    cs_di_free(scratch_);
+  }
+}
+
+csn* CXSparse::Cholesky(cs_di* A, cs_dis* symbolic_factor) {
+  return cs_di_chol(A, symbolic_factor);
+}
+
+void CXSparse::Solve(cs_dis* symbolic_factor, csn* numeric_factor, double* b) {
+  // Make sure we have enough scratch space available.
+  const int num_cols = numeric_factor->L->n;
+  if (scratch_size_ < num_cols) {
+    if (scratch_size_ > 0) {
+      cs_di_free(scratch_);
+    }
+    scratch_ =
+        reinterpret_cast<CS_ENTRY*>(cs_di_malloc(num_cols, sizeof(CS_ENTRY)));
+    scratch_size_ = num_cols;
+  }
+
+  // When the Cholesky factor succeeded, these methods are
+  // guaranteed to succeeded as well. In the comments below, "x"
+  // refers to the scratch space.
+  //
+  // Set x = P * b.
+  CHECK(cs_di_ipvec(symbolic_factor->pinv, b, scratch_, num_cols));
+  // Set x = L \ x.
+  CHECK(cs_di_lsolve(numeric_factor->L, scratch_));
+  // Set x = L' \ x.
+  CHECK(cs_di_ltsolve(numeric_factor->L, scratch_));
+  // Set b = P' * x.
+  CHECK(cs_di_pvec(symbolic_factor->pinv, scratch_, b, num_cols));
+}
+
+bool CXSparse::SolveCholesky(cs_di* lhs, double* rhs_and_solution) {
+  return cs_cholsol(1, lhs, rhs_and_solution);
+}
+
+cs_dis* CXSparse::AnalyzeCholesky(cs_di* A) {
+  // order = 1 for Cholesky factor.
+  return cs_schol(1, A);
+}
+
+cs_dis* CXSparse::AnalyzeCholeskyWithNaturalOrdering(cs_di* A) {
+  // order = 0 for Natural ordering.
+  return cs_schol(0, A);
+}
+
+cs_dis* CXSparse::BlockAnalyzeCholesky(cs_di* A,
+                                       const vector<int>& row_blocks,
+                                       const vector<int>& col_blocks) {
+  const int num_row_blocks = row_blocks.size();
+  const int num_col_blocks = col_blocks.size();
+
+  vector<int> block_rows;
+  vector<int> block_cols;
+  CompressedColumnScalarMatrixToBlockMatrix(
+      A->i, A->p, row_blocks, col_blocks, &block_rows, &block_cols);
+  cs_di block_matrix;
+  block_matrix.m = num_row_blocks;
+  block_matrix.n = num_col_blocks;
+  block_matrix.nz = -1;
+  block_matrix.nzmax = block_rows.size();
+  block_matrix.p = &block_cols[0];
+  block_matrix.i = &block_rows[0];
+  block_matrix.x = NULL;
+
+  int* ordering = cs_amd(1, &block_matrix);
+  vector<int> block_ordering(num_row_blocks, -1);
+  std::copy(ordering, ordering + num_row_blocks, &block_ordering[0]);
+  cs_free(ordering);
+
+  vector<int> scalar_ordering;
+  BlockOrderingToScalarOrdering(row_blocks, block_ordering, &scalar_ordering);
+
+  cs_dis* symbolic_factor =
+      reinterpret_cast<cs_dis*>(cs_calloc(1, sizeof(cs_dis)));
+  symbolic_factor->pinv = cs_pinv(&scalar_ordering[0], A->n);
+  cs* permuted_A = cs_symperm(A, symbolic_factor->pinv, 0);
+
+  symbolic_factor->parent = cs_etree(permuted_A, 0);
+  int* postordering = cs_post(symbolic_factor->parent, A->n);
+  int* column_counts =
+      cs_counts(permuted_A, symbolic_factor->parent, postordering, 0);
+  cs_free(postordering);
+  cs_spfree(permuted_A);
+
+  symbolic_factor->cp = (int*)cs_malloc(A->n + 1, sizeof(int));
+  symbolic_factor->lnz = cs_cumsum(symbolic_factor->cp, column_counts, A->n);
+  symbolic_factor->unz = symbolic_factor->lnz;
+
+  cs_free(column_counts);
+
+  if (symbolic_factor->lnz < 0) {
+    cs_sfree(symbolic_factor);
+    symbolic_factor = NULL;
+  }
+
+  return symbolic_factor;
+}
+
+cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
+  cs_di At;
+  At.m = A->num_cols();
+  At.n = A->num_rows();
+  At.nz = -1;
+  At.nzmax = A->num_nonzeros();
+  At.p = A->mutable_rows();
+  At.i = A->mutable_cols();
+  At.x = A->mutable_values();
+  return At;
+}
+
+cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
+  cs_di_sparse tsm_wrapper;
+  tsm_wrapper.nzmax = tsm->num_nonzeros();
+  tsm_wrapper.nz = tsm->num_nonzeros();
+  tsm_wrapper.m = tsm->num_rows();
+  tsm_wrapper.n = tsm->num_cols();
+  tsm_wrapper.p = tsm->mutable_cols();
+  tsm_wrapper.i = tsm->mutable_rows();
+  tsm_wrapper.x = tsm->mutable_values();
+
+  return cs_compress(&tsm_wrapper);
+}
+
+void CXSparse::ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering) {
+  int* cs_ordering = cs_amd(1, A);
+  std::copy(cs_ordering, cs_ordering + A->m, ordering);
+  cs_free(cs_ordering);
+}
+
+cs_di* CXSparse::TransposeMatrix(cs_di* A) { return cs_di_transpose(A, 1); }
+
+cs_di* CXSparse::MatrixMatrixMultiply(cs_di* A, cs_di* B) {
+  return cs_di_multiply(A, B);
+}
+
+void CXSparse::Free(cs_di* sparse_matrix) { cs_di_spfree(sparse_matrix); }
+
+void CXSparse::Free(cs_dis* symbolic_factor) { cs_di_sfree(symbolic_factor); }
+
+void CXSparse::Free(csn* numeric_factor) { cs_di_nfree(numeric_factor); }
+
+std::unique_ptr<SparseCholesky> CXSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  return std::unique_ptr<SparseCholesky>(new CXSparseCholesky(ordering_type));
+}
+
+CompressedRowSparseMatrix::StorageType CXSparseCholesky::StorageType() const {
+  return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+}
+
+CXSparseCholesky::CXSparseCholesky(const OrderingType ordering_type)
+    : ordering_type_(ordering_type),
+      symbolic_factor_(NULL),
+      numeric_factor_(NULL) {}
+
+CXSparseCholesky::~CXSparseCholesky() {
+  FreeSymbolicFactorization();
+  FreeNumericFactorization();
+}
+
+LinearSolverTerminationType CXSparseCholesky::Factorize(
+    CompressedRowSparseMatrix* lhs, std::string* message) {
+  CHECK_EQ(lhs->storage_type(), StorageType());
+  if (lhs == NULL) {
+    *message = "Failure: Input lhs is NULL.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  cs_di cs_lhs = cs_.CreateSparseMatrixTransposeView(lhs);
+
+  if (symbolic_factor_ == NULL) {
+    if (ordering_type_ == NATURAL) {
+      symbolic_factor_ = cs_.AnalyzeCholeskyWithNaturalOrdering(&cs_lhs);
+    } else {
+      if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
+        symbolic_factor_ = cs_.BlockAnalyzeCholesky(
+            &cs_lhs, lhs->col_blocks(), lhs->row_blocks());
+      } else {
+        symbolic_factor_ = cs_.AnalyzeCholesky(&cs_lhs);
+      }
+    }
+
+    if (symbolic_factor_ == NULL) {
+      *message = "CXSparse Failure : Symbolic factorization failed.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    }
+  }
+
+  FreeNumericFactorization();
+  numeric_factor_ = cs_.Cholesky(&cs_lhs, symbolic_factor_);
+  if (numeric_factor_ == NULL) {
+    *message = "CXSparse Failure : Numeric factorization failed.";
+    return LINEAR_SOLVER_FAILURE;
+  }
+
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+LinearSolverTerminationType CXSparseCholesky::Solve(const double* rhs,
+                                                    double* solution,
+                                                    std::string* message) {
+  CHECK(numeric_factor_ != NULL)
+      << "Solve called without a call to Factorize first.";
+  const int num_cols = numeric_factor_->L->n;
+  memcpy(solution, rhs, num_cols * sizeof(*solution));
+  cs_.Solve(symbolic_factor_, numeric_factor_, solution);
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+void CXSparseCholesky::FreeSymbolicFactorization() {
+  if (symbolic_factor_ != NULL) {
+    cs_.Free(symbolic_factor_);
+    symbolic_factor_ = NULL;
+  }
+}
+
+void CXSparseCholesky::FreeNumericFactorization() {
+  if (numeric_factor_ != NULL) {
+    cs_.Free(numeric_factor_);
+    numeric_factor_ = NULL;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
new file mode 100644
index 0000000..28238d5
--- /dev/null
+++ b/internal/ceres/cxsparse.h
@@ -0,0 +1,179 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_INTERNAL_CXSPARSE_H_
+#define CERES_INTERNAL_CXSPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "cs.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+// This object provides access to solving linear systems using Cholesky
+// factorization with a known symbolic factorization. This features does not
+// explicitly exist in CXSparse. The methods in the class are nonstatic because
+// the class manages internal scratch space.
+class CXSparse {
+ public:
+  CXSparse();
+  ~CXSparse();
+
+  // Solve the system lhs * solution = rhs in place by using an
+  // approximate minimum degree fill reducing ordering.
+  bool SolveCholesky(cs_di* lhs, double* rhs_and_solution);
+
+  // Solves a linear system given its symbolic and numeric factorization.
+  void Solve(cs_dis* symbolic_factor,
+             csn* numeric_factor,
+             double* rhs_and_solution);
+
+  // Compute the numeric Cholesky factorization of A, given its
+  // symbolic factorization.
+  //
+  // Caller owns the result.
+  csn* Cholesky(cs_di* A, cs_dis* symbolic_factor);
+
+  // Creates a sparse matrix from a compressed-column form. No memory is
+  // allocated or copied; the structure A is filled out with info from the
+  // argument.
+  cs_di CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+
+  // Creates a new matrix from a triplet form. Deallocate the returned matrix
+  // with Free. May return NULL if the compression or allocation fails.
+  cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
+
+  // B = A'
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
+  cs_di* TransposeMatrix(cs_di* A);
+
+  // C = A * B
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
+  cs_di* MatrixMatrixMultiply(cs_di* A, cs_di* B);
+
+  // Computes a symbolic factorization of A that can be used in SolveCholesky.
+  //
+  // The returned matrix should be deallocated with Free when not used anymore.
+  cs_dis* AnalyzeCholesky(cs_di* A);
+
+  // Computes a symbolic factorization of A that can be used in
+  // SolveCholesky, but does not compute a fill-reducing ordering.
+  //
+  // The returned matrix should be deallocated with Free when not used anymore.
+  cs_dis* AnalyzeCholeskyWithNaturalOrdering(cs_di* A);
+
+  // Computes a symbolic factorization of A that can be used in
+  // SolveCholesky. The difference from AnalyzeCholesky is that this
+  // function first detects the block sparsity of the matrix using
+  // information about the row and column blocks and uses this block
+  // sparse matrix to find a fill-reducing ordering. This ordering is
+  // then used to find a symbolic factorization. This can result in a
+  // significant performance improvement AnalyzeCholesky on block
+  // sparse matrices.
+  //
+  // The returned matrix should be deallocated with Free when not used
+  // anymore.
+  cs_dis* BlockAnalyzeCholesky(cs_di* A,
+                               const std::vector<int>& row_blocks,
+                               const std::vector<int>& col_blocks);
+
+  // Compute an fill-reducing approximate minimum degree ordering of
+  // the matrix A. ordering should be non-NULL and should point to
+  // enough memory to hold the ordering for the rows of A.
+  void ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering);
+
+  void Free(cs_di* sparse_matrix);
+  void Free(cs_dis* symbolic_factorization);
+  void Free(csn* numeric_factorization);
+
+ private:
+  // Cached scratch space
+  CS_ENTRY* scratch_;
+  int scratch_size_;
+};
+
+// An implementation of SparseCholesky interface using the CXSparse
+// library.
+class CXSparseCholesky : public SparseCholesky {
+ public:
+  // Factory
+  static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
+
+  // SparseCholesky interface.
+  virtual ~CXSparseCholesky();
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const;
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message);
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message);
+
+ private:
+  CXSparseCholesky(const OrderingType ordering_type);
+  void FreeSymbolicFactorization();
+  void FreeNumericFactorization();
+
+  const OrderingType ordering_type_;
+  CXSparse cs_;
+  cs_dis* symbolic_factor_;
+  csn* numeric_factor_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#else   // CERES_NO_CXSPARSE
+
+typedef void cs_dis;
+
+class CXSparse {
+ public:
+  void Free(void* arg) {}
+};
+#endif  // CERES_NO_CXSPARSE
+
+#endif  // CERES_INTERNAL_CXSPARSE_H_
diff --git a/internal/ceres/dense_jacobian_writer.h b/internal/ceres/dense_jacobian_writer.h
new file mode 100644
index 0000000..1b04f38
--- /dev/null
+++ b/internal/ceres/dense_jacobian_writer.h
@@ -0,0 +1,108 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that writes to dense Eigen matrices.
+
+#ifndef CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
+
+#include "ceres/casts.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseJacobianWriter {
+ public:
+  DenseJacobianWriter(Evaluator::Options /* ignored */,
+                      Program* program)
+    : program_(program) {
+  }
+
+  // JacobianWriter interface.
+
+  // Since the dense matrix has different layout than that assumed by the cost
+  // functions, use scratch space to store the jacobians temporarily then copy
+  // them over to the larger jacobian later.
+  ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
+    return ScratchEvaluatePreparer::Create(*program_, num_threads);
+  }
+
+  SparseMatrix* CreateJacobian() const {
+    return new DenseSparseMatrix(program_->NumResiduals(),
+                                 program_->NumEffectiveParameters(),
+                                 true);
+  }
+
+  void Write(int residual_id,
+             int residual_offset,
+             double **jacobians,
+             SparseMatrix* jacobian) {
+    DenseSparseMatrix* dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
+    const ResidualBlock* residual_block =
+        program_->residual_blocks()[residual_id];
+    int num_parameter_blocks = residual_block->NumParameterBlocks();
+    int num_residuals = residual_block->NumResiduals();
+
+    // Now copy the jacobians for each parameter into the dense jacobian matrix.
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+
+      // If the parameter block is fixed, then there is nothing to do.
+      if (parameter_block->IsConstant()) {
+        continue;
+      }
+
+      const int parameter_block_size = parameter_block->LocalSize();
+      ConstMatrixRef parameter_jacobian(jacobians[j],
+                                        num_residuals,
+                                        parameter_block_size);
+
+      dense_jacobian->mutable_matrix().block(
+          residual_offset,
+          parameter_block->delta_offset(),
+          num_residuals,
+          parameter_block_size) = parameter_jacobian;
+    }
+  }
+
+ private:
+  Program* program_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/dense_linear_solver_test.cc b/internal/ceres/dense_linear_solver_test.cc
new file mode 100644
index 0000000..e2e02ca
--- /dev/null
+++ b/internal/ceres/dense_linear_solver_test.cc
@@ -0,0 +1,137 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+typedef ::testing::
+    tuple<LinearSolverType, DenseLinearAlgebraLibraryType, bool, int>
+        Param;
+
+std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
+  Param param = info.param;
+  std::stringstream ss;
+  ss << LinearSolverTypeToString(::testing::get<0>(param)) << "_"
+     << DenseLinearAlgebraLibraryTypeToString(::testing::get<1>(param)) << "_"
+     << (::testing::get<2>(param) ? "Regularized" : "Unregularized") << "_"
+     << ::testing::get<3>(param);
+  return ss.str();
+}
+
+class DenseLinearSolverTest : public ::testing::TestWithParam<Param> {};
+
+TEST_P(DenseLinearSolverTest, _) {
+  Param param = GetParam();
+  const bool regularized = testing::get<2>(param);
+
+  std::unique_ptr<LinearLeastSquaresProblem> problem(
+      CreateLinearLeastSquaresProblemFromId(testing::get<3>(param)));
+  DenseSparseMatrix lhs(*down_cast<TripletSparseMatrix*>(problem->A.get()));
+
+  const int num_cols = lhs.num_cols();
+  const int num_rows = lhs.num_rows();
+
+  Vector rhs = Vector::Zero(num_rows + num_cols);
+  rhs.head(num_rows) = ConstVectorRef(problem->b.get(), num_rows);
+
+  LinearSolver::Options options;
+  options.type = ::testing::get<0>(param);
+  options.dense_linear_algebra_library_type = ::testing::get<1>(param);
+  ContextImpl context;
+  options.context = &context;
+  std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
+  LinearSolver::PerSolveOptions per_solve_options;
+  if (regularized) {
+    per_solve_options.D = problem->D.get();
+  }
+
+  Vector solution(num_cols);
+  LinearSolver::Summary summary =
+      solver->Solve(&lhs, rhs.data(), per_solve_options, solution.data());
+  EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+  // If solving for the regularized solution, add the diagonal to the
+  // matrix. This makes subsequent computations simpler.
+  if (testing::get<2>(param)) {
+    lhs.AppendDiagonal(problem->D.get());
+  };
+
+  Vector tmp = Vector::Zero(num_rows + num_cols);
+  lhs.RightMultiply(solution.data(), tmp.data());
+  Vector actual_normal_rhs = Vector::Zero(num_cols);
+  lhs.LeftMultiply(tmp.data(), actual_normal_rhs.data());
+
+  Vector expected_normal_rhs = Vector::Zero(num_cols);
+  lhs.LeftMultiply(rhs.data(), expected_normal_rhs.data());
+  const double residual = (expected_normal_rhs - actual_normal_rhs).norm() /
+                          expected_normal_rhs.norm();
+
+  EXPECT_NEAR(residual, 0.0, 10 * std::numeric_limits<double>::epsilon());
+}
+
+// TODO(sameeragarwal): Should we move away from hard coded linear
+// least squares problem to randomly generated ones?
+#ifndef CERES_NO_LAPACK
+
+INSTANTIATE_TEST_CASE_P(
+    DenseLinearSolver,
+    DenseLinearSolverTest,
+    ::testing::Combine(::testing::Values(DENSE_QR, DENSE_NORMAL_CHOLESKY),
+                       ::testing::Values(EIGEN, LAPACK),
+                       ::testing::Values(true, false),
+                       ::testing::Values(0, 1)),
+    ParamInfoToString);
+
+#else
+
+INSTANTIATE_TEST_CASE_P(
+    DenseLinearSolver,
+    DenseLinearSolverTest,
+    ::testing::Combine(::testing::Values(DENSE_QR, DENSE_NORMAL_CHOLESKY),
+                       ::testing::Values(EIGEN),
+                       ::testing::Values(true, false),
+                       ::testing::Values(0, 1)),
+    ParamInfoToString);
+
+#endif
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
new file mode 100644
index 0000000..fe7d931
--- /dev/null
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -0,0 +1,165 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dense_normal_cholesky_solver.h"
+
+#include <cstddef>
+
+#include "Eigen/Dense"
+#include "ceres/blas.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/lapack.h"
+#include "ceres/linear_solver.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+DenseNormalCholeskySolver::DenseNormalCholeskySolver(
+    const LinearSolver::Options& options)
+    : options_(options) {}
+
+LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  if (options_.dense_linear_algebra_library_type == EIGEN) {
+    return SolveUsingEigen(A, b, per_solve_options, x);
+  } else {
+    return SolveUsingLAPACK(A, b, per_solve_options, x);
+  }
+}
+
+LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingEigen(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("DenseNormalCholeskySolver::Solve");
+
+  const int num_rows = A->num_rows();
+  const int num_cols = A->num_cols();
+
+  ConstColMajorMatrixRef Aref = A->matrix();
+  Matrix lhs(num_cols, num_cols);
+  lhs.setZero();
+
+  event_logger.AddEvent("Setup");
+
+  //   lhs += A'A
+  //
+  // Using rankUpdate instead of GEMM, exposes the fact that its the
+  // same matrix being multiplied with itself and that the product is
+  // symmetric.
+  lhs.selfadjointView<Eigen::Upper>().rankUpdate(Aref.transpose());
+
+  //   rhs = A'b
+  Vector rhs = Aref.transpose() * ConstVectorRef(b, num_rows);
+
+  if (per_solve_options.D != NULL) {
+    ConstVectorRef D(per_solve_options.D, num_cols);
+    lhs += D.array().square().matrix().asDiagonal();
+  }
+  event_logger.AddEvent("Product");
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  Eigen::LLT<Matrix, Eigen::Upper> llt =
+      lhs.selfadjointView<Eigen::Upper>().llt();
+
+  if (llt.info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen LLT decomposition failed.";
+  } else {
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message = "Success.";
+  }
+
+  VectorRef(x, num_cols) = llt.solve(rhs);
+  event_logger.AddEvent("Solve");
+  return summary;
+}
+
+LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("DenseNormalCholeskySolver::Solve");
+
+  if (per_solve_options.D != NULL) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    A->AppendDiagonal(per_solve_options.D);
+  }
+
+  const int num_cols = A->num_cols();
+  Matrix lhs(num_cols, num_cols);
+  event_logger.AddEvent("Setup");
+
+  // lhs = A'A
+  //
+  // Note: This is a bit delicate, it assumes that the stride on this
+  // matrix is the same as the number of rows.
+  BLAS::SymmetricRankKUpdate(A->num_rows(),
+                             num_cols,
+                             A->values(),
+                             true,
+                             1.0,
+                             0.0,
+                             lhs.data());
+
+  if (per_solve_options.D != NULL) {
+    // Undo the modifications to the matrix A.
+    A->RemoveDiagonal();
+  }
+
+  // TODO(sameeragarwal): Replace this with a gemv call for true blasness.
+  //   rhs = A'b
+  VectorRef(x, num_cols) =
+      A->matrix().transpose() * ConstVectorRef(b, A->num_rows());
+  event_logger.AddEvent("Product");
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type =
+      LAPACK::SolveInPlaceUsingCholesky(num_cols,
+                                        lhs.data(),
+                                        x,
+                                        &summary.message);
+  event_logger.AddEvent("Solve");
+  return summary;
+}
+}   // namespace internal
+}   // namespace ceres
diff --git a/internal/ceres/dense_normal_cholesky_solver.h b/internal/ceres/dense_normal_cholesky_solver.h
new file mode 100644
index 0000000..c10bd7b
--- /dev/null
+++ b/internal/ceres/dense_normal_cholesky_solver.h
@@ -0,0 +1,105 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Solve dense rectangular systems Ax = b by forming the normal
+// equations and solving them using the Cholesky factorization.
+
+#ifndef CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseSparseMatrix;
+
+// This class implements the LinearSolver interface for solving
+// rectangular/unsymmetric (well constrained) linear systems of the
+// form
+//
+//   Ax = b
+//
+// Since there does not usually exist a solution that satisfies these
+// equations, the solver instead solves the linear least squares
+// problem
+//
+//   min_x |Ax - b|^2
+//
+// Setting the gradient of the above optimization problem to zero
+// gives us the normal equations
+//
+//   A'Ax = A'b
+//
+// A'A is a positive definite matrix (hopefully), and the resulting
+// linear system can be solved using Cholesky factorization.
+//
+// If the PerSolveOptions struct has a non-null array D, then the
+// augmented/regularized linear system
+//
+//   [    A    ]x = [b]
+//   [ diag(D) ]    [0]
+//
+// is solved.
+//
+// This class uses the LDLT factorization routines from the Eigen
+// library. This solver always returns a solution, it is the user's
+// responsibility to judge if the solution is good enough for their
+// purposes.
+class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
+ public:
+  explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  LinearSolver::Summary SolveUsingLAPACK(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  LinearSolver::Summary SolveUsingEigen(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  const LinearSolver::Options options_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc
new file mode 100644
index 0000000..161e9c6
--- /dev/null
+++ b/internal/ceres/dense_qr_solver.cc
@@ -0,0 +1,168 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dense_qr_solver.h"
+
+#include <cstddef>
+#include "Eigen/Dense"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/lapack.h"
+#include "ceres/linear_solver.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+DenseQRSolver::DenseQRSolver(const LinearSolver::Options& options)
+    : options_(options) {
+  work_.resize(1);
+}
+
+LinearSolver::Summary DenseQRSolver::SolveImpl(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  if (options_.dense_linear_algebra_library_type == EIGEN) {
+    return SolveUsingEigen(A, b, per_solve_options, x);
+  } else {
+    return SolveUsingLAPACK(A, b, per_solve_options, x);
+  }
+}
+
+LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("DenseQRSolver::Solve");
+
+  const int num_rows = A->num_rows();
+  const int num_cols = A->num_cols();
+
+  if (per_solve_options.D != NULL) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    A->AppendDiagonal(per_solve_options.D);
+  }
+
+  // TODO(sameeragarwal): Since we are copying anyways, the diagonal
+  // can be appended to the matrix instead of doing it on A.
+  lhs_ =  A->matrix();
+
+  if (per_solve_options.D != NULL) {
+    // Undo the modifications to the matrix A.
+    A->RemoveDiagonal();
+  }
+
+  // rhs = [b;0] to account for the additional rows in the lhs.
+  if (rhs_.rows() != lhs_.rows()) {
+    rhs_.resize(lhs_.rows());
+  }
+  rhs_.setZero();
+  rhs_.head(num_rows) = ConstVectorRef(b, num_rows);
+
+  if (work_.rows() == 1) {
+    const int work_size =
+        LAPACK::EstimateWorkSizeForQR(lhs_.rows(), lhs_.cols());
+    VLOG(3) << "Working memory for Dense QR factorization: "
+            << work_size * sizeof(double);
+    work_.resize(work_size);
+  }
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LAPACK::SolveInPlaceUsingQR(lhs_.rows(),
+                                                         lhs_.cols(),
+                                                         lhs_.data(),
+                                                         work_.rows(),
+                                                         work_.data(),
+                                                         rhs_.data(),
+                                                         &summary.message);
+  event_logger.AddEvent("Solve");
+  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+    VectorRef(x, num_cols) = rhs_.head(num_cols);
+  }
+
+  event_logger.AddEvent("TearDown");
+  return summary;
+}
+
+LinearSolver::Summary DenseQRSolver::SolveUsingEigen(
+    DenseSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("DenseQRSolver::Solve");
+
+  const int num_rows = A->num_rows();
+  const int num_cols = A->num_cols();
+
+  if (per_solve_options.D != NULL) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    A->AppendDiagonal(per_solve_options.D);
+  }
+
+  // rhs = [b;0] to account for the additional rows in the lhs.
+  const int augmented_num_rows =
+      num_rows + ((per_solve_options.D != NULL) ? num_cols : 0);
+  if (rhs_.rows() != augmented_num_rows) {
+    rhs_.resize(augmented_num_rows);
+    rhs_.setZero();
+  }
+  rhs_.head(num_rows) = ConstVectorRef(b, num_rows);
+  event_logger.AddEvent("Setup");
+
+  // Solve the system.
+  VectorRef(x, num_cols) = A->matrix().householderQr().solve(rhs_);
+  event_logger.AddEvent("Solve");
+
+  if (per_solve_options.D != NULL) {
+    // Undo the modifications to the matrix A.
+    A->RemoveDiagonal();
+  }
+
+  // We always succeed, since the QR solver returns the best solution
+  // it can. It is the job of the caller to determine if the solution
+  // is good enough or not.
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  event_logger.AddEvent("TearDown");
+  return summary;
+}
+
+}   // namespace internal
+}   // namespace ceres
diff --git a/internal/ceres/dense_qr_solver.h b/internal/ceres/dense_qr_solver.h
new file mode 100644
index 0000000..2ec124f
--- /dev/null
+++ b/internal/ceres/dense_qr_solver.h
@@ -0,0 +1,113 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Solve dense rectangular systems Ax = b using the QR factorization.
+#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
+#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseSparseMatrix;
+
+// This class implements the LinearSolver interface for solving
+// rectangular/unsymmetric (well constrained) linear systems of the
+// form
+//
+//   Ax = b
+//
+// Since there does not usually exist a solution that satisfies these
+// equations, the solver instead solves the linear least squares
+// problem
+//
+//   min_x |Ax - b|^2
+//
+// The solution strategy is based on computing the QR decomposition of
+// A, i.e.
+//
+//   A = QR
+//
+// Where Q is an orthonormal matrix and R is an upper triangular
+// matrix. Then
+//
+//     Ax = b
+//    QRx = b
+//  Q'QRx = Q'b
+//     Rx = Q'b
+//      x = R^{-1} Q'b
+//
+// If the PerSolveOptions struct has a non-null array D, then the
+// augmented/regularized linear system
+//
+//   [    A    ]x = [b]
+//   [ diag(D) ]    [0]
+//
+// is solved.
+//
+// This class uses the dense QR factorization routines from the Eigen
+// library. This solver always returns a solution, it is the user's
+// responsibility to judge if the solution is good enough for their
+// purposes.
+class DenseQRSolver: public DenseSparseMatrixSolver {
+ public:
+  explicit DenseQRSolver(const LinearSolver::Options& options);
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  LinearSolver::Summary SolveUsingEigen(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  LinearSolver::Summary SolveUsingLAPACK(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+  const LinearSolver::Options options_;
+  ColMajorMatrix lhs_;
+  Vector rhs_;
+  Vector work_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DENSE_QR_SOLVER_H_
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc
new file mode 100644
index 0000000..72e0836
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix.cc
@@ -0,0 +1,183 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/dense_sparse_matrix.h"
+
+#include <algorithm>
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
+    : has_diagonal_appended_(false),
+      has_diagonal_reserved_(false) {
+  m_.resize(num_rows, num_cols);
+  m_.setZero();
+}
+
+DenseSparseMatrix::DenseSparseMatrix(int num_rows,
+                                     int num_cols,
+                                     bool reserve_diagonal)
+    : has_diagonal_appended_(false),
+      has_diagonal_reserved_(reserve_diagonal) {
+  if (reserve_diagonal) {
+    // Allocate enough space for the diagonal.
+    m_.resize(num_rows +  num_cols, num_cols);
+  } else {
+    m_.resize(num_rows, num_cols);
+  }
+  m_.setZero();
+}
+
+DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
+    : m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
+      has_diagonal_appended_(false),
+      has_diagonal_reserved_(false) {
+  const double *values = m.values();
+  const int *rows = m.rows();
+  const int *cols = m.cols();
+  int num_nonzeros = m.num_nonzeros();
+
+  for (int i = 0; i < num_nonzeros; ++i) {
+    m_(rows[i], cols[i]) += values[i];
+  }
+}
+
+DenseSparseMatrix::DenseSparseMatrix(const ColMajorMatrix& m)
+    : m_(m),
+      has_diagonal_appended_(false),
+      has_diagonal_reserved_(false) {
+}
+
+void DenseSparseMatrix::SetZero() {
+  m_.setZero();
+}
+
+void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
+  VectorRef(y, num_rows()) += matrix() * ConstVectorRef(x, num_cols());
+}
+
+void DenseSparseMatrix::LeftMultiply(const double* x, double* y) const {
+  VectorRef(y, num_cols()) +=
+      matrix().transpose() * ConstVectorRef(x, num_rows());
+}
+
+void DenseSparseMatrix::SquaredColumnNorm(double* x) const {
+  VectorRef(x, num_cols()) = m_.colwise().squaredNorm();
+}
+
+void DenseSparseMatrix::ScaleColumns(const double* scale) {
+  m_ *= ConstVectorRef(scale, num_cols()).asDiagonal();
+}
+
+void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+  *dense_matrix = m_.block(0, 0, num_rows(), num_cols());
+}
+
+void DenseSparseMatrix::AppendDiagonal(double *d) {
+  CHECK(!has_diagonal_appended_);
+  if (!has_diagonal_reserved_) {
+    ColMajorMatrix tmp = m_;
+    m_.resize(m_.rows() + m_.cols(), m_.cols());
+    m_.setZero();
+    m_.block(0, 0, tmp.rows(), tmp.cols()) = tmp;
+    has_diagonal_reserved_ = true;
+  }
+
+  m_.bottomLeftCorner(m_.cols(), m_.cols()) =
+      ConstVectorRef(d, m_.cols()).asDiagonal();
+  has_diagonal_appended_ = true;
+}
+
+void DenseSparseMatrix::RemoveDiagonal() {
+  CHECK(has_diagonal_appended_);
+  has_diagonal_appended_ = false;
+  // Leave the diagonal reserved.
+}
+
+int DenseSparseMatrix::num_rows() const {
+  if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+    return m_.rows() - m_.cols();
+  }
+  return m_.rows();
+}
+
+int DenseSparseMatrix::num_cols() const {
+  return m_.cols();
+}
+
+int DenseSparseMatrix::num_nonzeros() const {
+  if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+    return (m_.rows() - m_.cols()) * m_.cols();
+  }
+  return m_.rows() * m_.cols();
+}
+
+ConstColMajorMatrixRef DenseSparseMatrix::matrix() const {
+  return ConstColMajorMatrixRef(
+      m_.data(),
+      ((has_diagonal_reserved_ && !has_diagonal_appended_)
+       ? m_.rows() - m_.cols()
+       : m_.rows()),
+      m_.cols(),
+      Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
+}
+
+ColMajorMatrixRef DenseSparseMatrix::mutable_matrix() {
+  return ColMajorMatrixRef(
+      m_.data(),
+      ((has_diagonal_reserved_ && !has_diagonal_appended_)
+       ? m_.rows() - m_.cols()
+       : m_.rows()),
+      m_.cols(),
+      Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
+}
+
+
+void DenseSparseMatrix::ToTextFile(FILE* file) const {
+  CHECK(file != nullptr);
+  const int active_rows =
+      (has_diagonal_reserved_ && !has_diagonal_appended_)
+      ? (m_.rows() - m_.cols())
+      : m_.rows();
+
+  for (int r = 0; r < active_rows; ++r) {
+    for (int c = 0; c < m_.cols(); ++c) {
+      fprintf(file,  "% 10d % 10d %17f\n", r, c, m_(r, c));
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dense_sparse_matrix.h b/internal/ceres/dense_sparse_matrix.h
new file mode 100644
index 0000000..e5a5483
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix.h
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A dense matrix implemented under the SparseMatrix interface.
+
+#ifndef CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
+
+#include "ceres/internal/eigen.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class TripletSparseMatrix;
+
+class DenseSparseMatrix : public SparseMatrix {
+ public:
+  // Build a matrix with the same content as the TripletSparseMatrix
+  // m. This assumes that m does not have any repeated entries.
+  explicit DenseSparseMatrix(const TripletSparseMatrix& m);
+  explicit DenseSparseMatrix(const ColMajorMatrix& m);
+
+  DenseSparseMatrix(int num_rows, int num_cols);
+  DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal);
+
+  virtual ~DenseSparseMatrix() {}
+
+  // SparseMatrix interface.
+  virtual void SetZero();
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual void LeftMultiply(const double* x, double* y) const;
+  virtual void SquaredColumnNorm(double* x) const;
+  virtual void ScaleColumns(const double* scale);
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+  virtual void ToTextFile(FILE* file) const;
+  virtual int num_rows() const;
+  virtual int num_cols() const;
+  virtual int num_nonzeros() const;
+  virtual const double* values() const { return m_.data(); }
+  virtual double* mutable_values() { return m_.data(); }
+
+  ConstColMajorMatrixRef matrix() const;
+  ColMajorMatrixRef mutable_matrix();
+
+  // Only one diagonal can be appended at a time. The diagonal is appended to
+  // as a new set of rows, e.g.
+  //
+  // Original matrix:
+  //
+  //   x x x
+  //   x x x
+  //   x x x
+  //
+  // After append diagonal (1, 2, 3):
+  //
+  //   x x x
+  //   x x x
+  //   x x x
+  //   1 0 0
+  //   0 2 0
+  //   0 0 3
+  //
+  // Calling RemoveDiagonal removes the block. It is a fatal error to append a
+  // diagonal to a matrix that already has an appended diagonal, and it is also
+  // a fatal error to remove a diagonal from a matrix that has none.
+  void AppendDiagonal(double *d);
+  void RemoveDiagonal();
+
+ private:
+  ColMajorMatrix m_;
+  bool has_diagonal_appended_;
+  bool has_diagonal_reserved_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
diff --git a/internal/ceres/dense_sparse_matrix_test.cc b/internal/ceres/dense_sparse_matrix_test.cc
new file mode 100644
index 0000000..4d52e81
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix_test.cc
@@ -0,0 +1,169 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// TODO(keir): Implement a generic "compare sparse matrix implementations" test
+// suite that can compare all the implementations. Then this file would shrink
+// in size.
+
+#include "ceres/dense_sparse_matrix.h"
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+void CompareMatrices(const SparseMatrix* a, const SparseMatrix* b) {
+  EXPECT_EQ(a->num_rows(), b->num_rows());
+  EXPECT_EQ(a->num_cols(), b->num_cols());
+
+  int num_rows = a->num_rows();
+  int num_cols = a->num_cols();
+
+  for (int i = 0; i < num_cols; ++i) {
+    Vector x = Vector::Zero(num_cols);
+    x(i) = 1.0;
+
+    Vector y_a = Vector::Zero(num_rows);
+    Vector y_b = Vector::Zero(num_rows);
+
+    a->RightMultiply(x.data(), y_a.data());
+    b->RightMultiply(x.data(), y_b.data());
+
+    EXPECT_EQ((y_a - y_b).norm(), 0);
+  }
+}
+
+class DenseSparseMatrixTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(1));
+
+    CHECK(problem != nullptr);
+
+    tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+    dsm.reset(new DenseSparseMatrix(*tsm));
+
+    num_rows = tsm->num_rows();
+    num_cols = tsm->num_cols();
+  }
+
+  int num_rows;
+  int num_cols;
+
+  std::unique_ptr<TripletSparseMatrix> tsm;
+  std::unique_ptr<DenseSparseMatrix> dsm;
+};
+
+TEST_F(DenseSparseMatrixTest, RightMultiply) {
+  CompareMatrices(tsm.get(), dsm.get());
+
+  // Try with a not entirely zero vector to verify column interactions, which
+  // could be masked by a subtle bug when using the elementary vectors.
+  Vector a(num_cols);
+  for (int i = 0; i < num_cols; i++) {
+    a(i) = i;
+  }
+  Vector b1 = Vector::Zero(num_rows);
+  Vector b2 = Vector::Zero(num_rows);
+
+  tsm->RightMultiply(a.data(), b1.data());
+  dsm->RightMultiply(a.data(), b2.data());
+
+  EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, LeftMultiply) {
+  for (int i = 0; i < num_rows; ++i) {
+    Vector a = Vector::Zero(num_rows);
+    a(i) = 1.0;
+
+    Vector b1 = Vector::Zero(num_cols);
+    Vector b2 = Vector::Zero(num_cols);
+
+    tsm->LeftMultiply(a.data(), b1.data());
+    dsm->LeftMultiply(a.data(), b2.data());
+
+    EXPECT_EQ((b1 - b2).norm(), 0);
+  }
+
+  // Try with a not entirely zero vector to verify column interactions, which
+  // could be masked by a subtle bug when using the elementary vectors.
+  Vector a(num_rows);
+  for (int i = 0; i < num_rows; i++) {
+    a(i) = i;
+  }
+  Vector b1 = Vector::Zero(num_cols);
+  Vector b2 = Vector::Zero(num_cols);
+
+  tsm->LeftMultiply(a.data(), b1.data());
+  dsm->LeftMultiply(a.data(), b2.data());
+
+  EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, ColumnNorm) {
+  Vector b1 = Vector::Zero(num_cols);
+  Vector b2 = Vector::Zero(num_cols);
+
+  tsm->SquaredColumnNorm(b1.data());
+  dsm->SquaredColumnNorm(b2.data());
+
+  EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, Scale) {
+  Vector scale(num_cols);
+  for (int i = 0; i < num_cols; ++i) {
+    scale(i) = i + 1;
+  }
+  tsm->ScaleColumns(scale.data());
+  dsm->ScaleColumns(scale.data());
+  CompareMatrices(tsm.get(), dsm.get());
+}
+
+TEST_F(DenseSparseMatrixTest, ToDenseMatrix) {
+  Matrix tsm_dense;
+  Matrix dsm_dense;
+
+  tsm->ToDenseMatrix(&tsm_dense);
+  dsm->ToDenseMatrix(&dsm_dense);
+
+  EXPECT_EQ((tsm_dense - dsm_dense).norm(), 0.0);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/detect_structure.cc b/internal/ceres/detect_structure.cc
new file mode 100644
index 0000000..959a0ee
--- /dev/null
+++ b/internal/ceres/detect_structure.cc
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/detect_structure.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+void DetectStructure(const CompressedRowBlockStructure& bs,
+                     const int num_eliminate_blocks,
+                     int* row_block_size,
+                     int* e_block_size,
+                     int* f_block_size) {
+  const int num_row_blocks = bs.rows.size();
+  *row_block_size = 0;
+  *e_block_size = 0;
+  *f_block_size = 0;
+
+  // Iterate over row blocks of the matrix, checking if row_block,
+  // e_block or f_block sizes remain constant.
+  for (int r = 0; r < num_row_blocks; ++r) {
+    const CompressedRow& row = bs.rows[r];
+    // We do not care about the sizes of the blocks in rows which do
+    // not contain e_blocks.
+    if (row.cells.front().block_id >= num_eliminate_blocks) {
+      break;
+    }
+
+    // Detect fixed or dynamic row block size.
+    if (*row_block_size == 0) {
+      *row_block_size = row.block.size;
+    } else if (*row_block_size != Eigen::Dynamic &&
+               *row_block_size != row.block.size) {
+      VLOG(2) << "Dynamic row block size because the block size changed from "
+              << *row_block_size << " to "
+              << row.block.size;
+      *row_block_size = Eigen::Dynamic;
+    }
+
+    // Detect fixed or dynamic e-block size.
+    const int e_block_id = row.cells.front().block_id;
+    if (*e_block_size == 0) {
+      *e_block_size = bs.cols[e_block_id].size;
+    } else if (*e_block_size != Eigen::Dynamic &&
+               *e_block_size != bs.cols[e_block_id].size) {
+      VLOG(2) << "Dynamic e block size because the block size changed from "
+              << *e_block_size << " to "
+              << bs.cols[e_block_id].size;
+      *e_block_size = Eigen::Dynamic;
+    }
+
+    // Detect fixed or dynamic f-block size. We are only interested in
+    // rows with e-blocks, and the e-block is always the first block,
+    // so only rows of size greater than 1 are of interest.
+    if (row.cells.size() > 1) {
+      if (*f_block_size == 0) {
+        const int f_block_id = row.cells[1].block_id;
+        *f_block_size = bs.cols[f_block_id].size;
+      }
+
+      for (int c = 1;
+           (c < row.cells.size()) && (*f_block_size != Eigen::Dynamic);
+           ++c) {
+        const int f_block_id = row.cells[c].block_id;
+        if (*f_block_size != bs.cols[f_block_id].size) {
+          VLOG(2) << "Dynamic f block size because the block size "
+                  << "changed from " << *f_block_size << " to "
+                  << bs.cols[f_block_id].size;
+          *f_block_size = Eigen::Dynamic;
+        }
+      }
+    }
+
+    const bool is_everything_dynamic = (*row_block_size == Eigen::Dynamic &&
+                                        *e_block_size == Eigen::Dynamic &&
+                                        *f_block_size == Eigen::Dynamic);
+    if (is_everything_dynamic) {
+      break;
+    }
+  }
+
+  CHECK_NE(*row_block_size, 0) << "No rows found";
+  CHECK_NE(*e_block_size, 0) << "No e type blocks found";
+  VLOG(1) << "Schur complement static structure <"
+          << *row_block_size << ","
+          << *e_block_size << ","
+          << *f_block_size << ">.";
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/detect_structure.h b/internal/ceres/detect_structure.h
new file mode 100644
index 0000000..602581c
--- /dev/null
+++ b/internal/ceres/detect_structure.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DETECT_STRUCTURE_H_
+#define CERES_INTERNAL_DETECT_STRUCTURE_H_
+
+#include "ceres/block_structure.h"
+
+namespace ceres {
+namespace internal {
+
+// Detect static blocks in the problem sparsity. For rows containing
+// e_blocks, we are interested in detecting if the size of the row
+// blocks, e_blocks and the f_blocks remain constant. If they do, then
+// we can use template specialization to improve the performance of
+// the block level linear algebra operations used by the
+// SchurEliminator.
+//
+// If a block size is not constant, we return Eigen::Dynamic as the
+// value. This just means that the eliminator uses dynamically sized
+// linear algebra operations rather than static operations whose size
+// is known as compile time.
+//
+// For more details about e_blocks and f_blocks, see
+// schur_eliminator.h. This information is used to initialized an
+// appropriate template specialization of SchurEliminator.
+//
+// Note: The structure of rows without any e-blocks has no effect on
+// the values returned by this function. It is entirely possible that
+// the f_block_size and row_blocks_size is not constant in such rows.
+void DetectStructure(const CompressedRowBlockStructure& bs,
+                     const int num_eliminate_blocks,
+                     int* row_block_size,
+                     int* e_block_size,
+                     int* f_block_size);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DETECT_STRUCTURE_H_
diff --git a/internal/ceres/detect_structure_test.cc b/internal/ceres/detect_structure_test.cc
new file mode 100644
index 0000000..a701a19
--- /dev/null
+++ b/internal/ceres/detect_structure_test.cc
@@ -0,0 +1,294 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "Eigen/Core"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+#include "ceres/block_structure.h"
+#include "ceres/detect_structure.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(DetectStructure, EverythingStatic) {
+  const int expected_row_block_size = 2;
+  const int expected_e_block_size = 3;
+  const int expected_f_block_size = 4;
+
+  CompressedRowBlockStructure bs;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 0;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 3;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 7;
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(1, 0));
+  }
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 2;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  int row_block_size = 0;
+  int e_block_size = 0;
+  int f_block_size = 0;
+  const int num_eliminate_blocks = 1;
+  DetectStructure(bs,
+                  num_eliminate_blocks,
+                  &row_block_size,
+                  &e_block_size,
+                  &f_block_size);
+
+  EXPECT_EQ(row_block_size, expected_row_block_size);
+  EXPECT_EQ(e_block_size, expected_e_block_size);
+  EXPECT_EQ(f_block_size, expected_f_block_size);
+}
+
+TEST(DetectStructure, DynamicRow) {
+  const int expected_row_block_size = Eigen::Dynamic;
+  const int expected_e_block_size = 3;
+  const int expected_f_block_size = 4;
+
+  CompressedRowBlockStructure bs;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 0;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 3;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 7;
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(1, 0));
+  }
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 1;
+    row.block.position = 2;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  int row_block_size = 0;
+  int e_block_size = 0;
+  int f_block_size = 0;
+  const int num_eliminate_blocks = 1;
+  DetectStructure(bs,
+                  num_eliminate_blocks,
+                  &row_block_size,
+                  &e_block_size,
+                  &f_block_size);
+
+  EXPECT_EQ(row_block_size, expected_row_block_size);
+  EXPECT_EQ(e_block_size, expected_e_block_size);
+  EXPECT_EQ(f_block_size, expected_f_block_size);
+}
+
+TEST(DetectStructure, DynamicFBlockDifferentRows) {
+  const int expected_row_block_size = 2;
+  const int expected_e_block_size = 3;
+  const int expected_f_block_size = Eigen::Dynamic;
+
+
+  CompressedRowBlockStructure bs;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 0;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 3;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 7;
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(1, 0));
+  }
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 2;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  int row_block_size = 0;
+  int e_block_size = 0;
+  int f_block_size = 0;
+  const int num_eliminate_blocks = 1;
+  DetectStructure(bs,
+                  num_eliminate_blocks,
+                  &row_block_size,
+                  &e_block_size,
+                  &f_block_size);
+
+  EXPECT_EQ(row_block_size, expected_row_block_size);
+  EXPECT_EQ(e_block_size, expected_e_block_size);
+  EXPECT_EQ(f_block_size, expected_f_block_size);
+}
+
+TEST(DetectStructure, DynamicEBlock) {
+  const int expected_row_block_size = 2;
+  const int expected_e_block_size = Eigen::Dynamic;
+  const int expected_f_block_size = 3;
+
+  CompressedRowBlockStructure bs;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 0;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 3;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 7;
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 2;
+    row.cells.push_back(Cell(1, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  int row_block_size = 0;
+  int e_block_size = 0;
+  int f_block_size = 0;
+  const int num_eliminate_blocks = 2;
+  DetectStructure(bs,
+                  num_eliminate_blocks,
+                  &row_block_size,
+                  &e_block_size,
+                  &f_block_size);
+
+  EXPECT_EQ(row_block_size, expected_row_block_size);
+  EXPECT_EQ(e_block_size, expected_e_block_size);
+  EXPECT_EQ(f_block_size, expected_f_block_size);
+}
+
+TEST(DetectStructure, DynamicFBlockSameRow) {
+  const int expected_row_block_size = 2;
+  const int expected_e_block_size = 3;
+  const int expected_f_block_size = Eigen::Dynamic;
+
+  CompressedRowBlockStructure bs;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 0;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 4;
+  bs.cols.back().position = 3;
+
+  bs.cols.push_back(Block());
+  bs.cols.back().size = 3;
+  bs.cols.back().position = 7;
+
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(1, 0));
+    row.cells.push_back(Cell(2, 0));
+  }
+
+  int row_block_size = 0;
+  int e_block_size = 0;
+  int f_block_size = 0;
+  const int num_eliminate_blocks = 1;
+  DetectStructure(bs,
+                  num_eliminate_blocks,
+                  &row_block_size,
+                  &e_block_size,
+                  &f_block_size);
+
+  EXPECT_EQ(row_block_size, expected_row_block_size);
+  EXPECT_EQ(e_block_size, expected_e_block_size);
+  EXPECT_EQ(f_block_size, expected_f_block_size);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
new file mode 100644
index 0000000..ecc6b88
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.cc
@@ -0,0 +1,720 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dogleg_strategy.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "Eigen/Dense"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/polynomial.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+const double kMaxMu = 1.0;
+const double kMinMu = 1e-8;
+}
+
+DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
+    : linear_solver_(options.linear_solver),
+      radius_(options.initial_radius),
+      max_radius_(options.max_radius),
+      min_diagonal_(options.min_lm_diagonal),
+      max_diagonal_(options.max_lm_diagonal),
+      mu_(kMinMu),
+      min_mu_(kMinMu),
+      max_mu_(kMaxMu),
+      mu_increase_factor_(10.0),
+      increase_threshold_(0.75),
+      decrease_threshold_(0.25),
+      dogleg_step_norm_(0.0),
+      reuse_(false),
+      dogleg_type_(options.dogleg_type) {
+  CHECK(linear_solver_ != nullptr);
+  CHECK_GT(min_diagonal_, 0.0);
+  CHECK_LE(min_diagonal_, max_diagonal_);
+  CHECK_GT(max_radius_, 0.0);
+}
+
+// If the reuse_ flag is not set, then the Cauchy point (scaled
+// gradient) and the new Gauss-Newton step are computed from
+// scratch. The Dogleg step is then computed as interpolation of these
+// two vectors.
+TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
+    const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+    SparseMatrix* jacobian,
+    const double* residuals,
+    double* step) {
+  CHECK(jacobian != nullptr);
+  CHECK(residuals != nullptr);
+  CHECK(step != nullptr);
+
+  const int n = jacobian->num_cols();
+  if (reuse_) {
+    // Gauss-Newton and gradient vectors are always available, only a
+    // new interpolant need to be computed. For the subspace case,
+    // the subspace and the two-dimensional model are also still valid.
+    switch (dogleg_type_) {
+      case TRADITIONAL_DOGLEG:
+        ComputeTraditionalDoglegStep(step);
+        break;
+
+      case SUBSPACE_DOGLEG:
+        ComputeSubspaceDoglegStep(step);
+        break;
+    }
+    TrustRegionStrategy::Summary summary;
+    summary.num_iterations = 0;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    return summary;
+  }
+
+  reuse_ = true;
+  // Check that we have the storage needed to hold the various
+  // temporary vectors.
+  if (diagonal_.rows() != n) {
+    diagonal_.resize(n, 1);
+    gradient_.resize(n, 1);
+    gauss_newton_step_.resize(n, 1);
+  }
+
+  // Vector used to form the diagonal matrix that is used to
+  // regularize the Gauss-Newton solve and that defines the
+  // elliptical trust region
+  //
+  //   || D * step || <= radius_ .
+  //
+  jacobian->SquaredColumnNorm(diagonal_.data());
+  for (int i = 0; i < n; ++i) {
+    diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
+                            max_diagonal_);
+  }
+  diagonal_ = diagonal_.array().sqrt();
+
+  ComputeGradient(jacobian, residuals);
+  ComputeCauchyPoint(jacobian);
+
+  LinearSolver::Summary linear_solver_summary =
+      ComputeGaussNewtonStep(per_solve_options, jacobian, residuals);
+
+  TrustRegionStrategy::Summary summary;
+  summary.residual_norm = linear_solver_summary.residual_norm;
+  summary.num_iterations = linear_solver_summary.num_iterations;
+  summary.termination_type = linear_solver_summary.termination_type;
+
+  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    return summary;
+  }
+
+  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
+    switch (dogleg_type_) {
+      // Interpolate the Cauchy point and the Gauss-Newton step.
+      case TRADITIONAL_DOGLEG:
+        ComputeTraditionalDoglegStep(step);
+        break;
+
+      // Find the minimum in the subspace defined by the
+      // Cauchy point and the (Gauss-)Newton step.
+      case SUBSPACE_DOGLEG:
+        if (!ComputeSubspaceModel(jacobian)) {
+          summary.termination_type = LINEAR_SOLVER_FAILURE;
+          break;
+        }
+        ComputeSubspaceDoglegStep(step);
+        break;
+    }
+  }
+
+  return summary;
+}
+
+// The trust region is assumed to be elliptical with the
+// diagonal scaling matrix D defined by sqrt(diagonal_).
+// It is implemented by substituting step' = D * step.
+// The trust region for step' is spherical.
+// The gradient, the Gauss-Newton step, the Cauchy point,
+// and all calculations involving the Jacobian have to
+// be adjusted accordingly.
+void DoglegStrategy::ComputeGradient(
+    SparseMatrix* jacobian,
+    const double* residuals) {
+  gradient_.setZero();
+  jacobian->LeftMultiply(residuals, gradient_.data());
+  gradient_.array() /= diagonal_.array();
+}
+
+// The Cauchy point is the global minimizer of the quadratic model
+// along the one-dimensional subspace spanned by the gradient.
+void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
+  // alpha * -gradient is the Cauchy point.
+  Vector Jg(jacobian->num_rows());
+  Jg.setZero();
+  // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
+  // instead of (J * D^-1) * (D^-1 * g).
+  Vector scaled_gradient =
+      (gradient_.array() / diagonal_.array()).matrix();
+  jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
+  alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
+}
+
+// The dogleg step is defined as the intersection of the trust region
+// boundary with the piecewise linear path from the origin to the Cauchy
+// point and then from there to the Gauss-Newton point (global minimizer
+// of the model function). The Gauss-Newton point is taken if it lies
+// within the trust region.
+void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
+  VectorRef dogleg_step(dogleg, gradient_.rows());
+
+  // Case 1. The Gauss-Newton step lies inside the trust region, and
+  // is therefore the optimal solution to the trust-region problem.
+  const double gradient_norm = gradient_.norm();
+  const double gauss_newton_norm = gauss_newton_step_.norm();
+  if (gauss_newton_norm <= radius_) {
+    dogleg_step = gauss_newton_step_;
+    dogleg_step_norm_ = gauss_newton_norm;
+    dogleg_step.array() /= diagonal_.array();
+    VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+            << " radius: " << radius_;
+    return;
+  }
+
+  // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
+  // the trust region. Rescale the Cauchy point to the trust region
+  // and return.
+  if  (gradient_norm * alpha_ >= radius_) {
+    dogleg_step = -(radius_ / gradient_norm) * gradient_;
+    dogleg_step_norm_ = radius_;
+    dogleg_step.array() /= diagonal_.array();
+    VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
+            << " radius: " << radius_;
+    return;
+  }
+
+  // Case 3. The Cauchy point is inside the trust region and the
+  // Gauss-Newton step is outside. Compute the line joining the two
+  // points and the point on it which intersects the trust region
+  // boundary.
+
+  // a = alpha * -gradient
+  // b = gauss_newton_step
+  const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
+  const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
+  const double b_minus_a_squared_norm =
+      a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
+
+  // c = a' (b - a)
+  //   = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
+  const double c = b_dot_a - a_squared_norm;
+  const double d = sqrt(c * c + b_minus_a_squared_norm *
+                        (pow(radius_, 2.0) - a_squared_norm));
+
+  double beta =
+      (c <= 0)
+      ? (d - c) /  b_minus_a_squared_norm
+      : (radius_ * radius_ - a_squared_norm) / (d + c);
+  dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
+      + beta * gauss_newton_step_;
+  dogleg_step_norm_ = dogleg_step.norm();
+  dogleg_step.array() /= diagonal_.array();
+  VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
+          << " radius: " << radius_;
+}
+
+// The subspace method finds the minimum of the two-dimensional problem
+//
+//   min. 1/2 x' B' H B x + g' B x
+//   s.t. || B x ||^2 <= r^2
+//
+// where r is the trust region radius and B is the matrix with unit columns
+// spanning the subspace defined by the steepest descent and Newton direction.
+// This subspace by definition includes the Gauss-Newton point, which is
+// therefore taken if it lies within the trust region.
+void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
+  VectorRef dogleg_step(dogleg, gradient_.rows());
+
+  // The Gauss-Newton point is inside the trust region if |GN| <= radius_.
+  // This test is valid even though radius_ is a length in the two-dimensional
+  // subspace while gauss_newton_step_ is expressed in the (scaled)
+  // higher dimensional original space. This is because
+  //
+  //   1. gauss_newton_step_ by definition lies in the subspace, and
+  //   2. the subspace basis is orthonormal.
+  //
+  // As a consequence, the norm of the gauss_newton_step_ in the subspace is
+  // the same as its norm in the original space.
+  const double gauss_newton_norm = gauss_newton_step_.norm();
+  if (gauss_newton_norm <= radius_) {
+    dogleg_step = gauss_newton_step_;
+    dogleg_step_norm_ = gauss_newton_norm;
+    dogleg_step.array() /= diagonal_.array();
+    VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+            << " radius: " << radius_;
+    return;
+  }
+
+  // The optimum lies on the boundary of the trust region. The above problem
+  // therefore becomes
+  //
+  //   min. 1/2 x^T B^T H B x + g^T B x
+  //   s.t. || B x ||^2 = r^2
+  //
+  // Notice the equality in the constraint.
+  //
+  // This can be solved by forming the Lagrangian, solving for x(y), where
+  // y is the Lagrange multiplier, using the gradient of the objective, and
+  // putting x(y) back into the constraint. This results in a fourth order
+  // polynomial in y, which can be solved using e.g. the companion matrix.
+  // See the description of MakePolynomialForBoundaryConstrainedProblem for
+  // details. The result is up to four real roots y*, not all of which
+  // correspond to feasible points. The feasible points x(y*) have to be
+  // tested for optimality.
+
+  if (subspace_is_one_dimensional_) {
+    // The subspace is one-dimensional, so both the gradient and
+    // the Gauss-Newton step point towards the same direction.
+    // In this case, we move along the gradient until we reach the trust
+    // region boundary.
+    dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
+    dogleg_step_norm_ = radius_;
+    dogleg_step.array() /= diagonal_.array();
+    VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
+            << " radius: " << radius_;
+    return;
+  }
+
+  Vector2d minimum(0.0, 0.0);
+  if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
+    // For the positive semi-definite case, a traditional dogleg step
+    // is taken in this case.
+    LOG(WARNING) << "Failed to compute polynomial roots. "
+                 << "Taking traditional dogleg step instead.";
+    ComputeTraditionalDoglegStep(dogleg);
+    return;
+  }
+
+  // Test first order optimality at the minimum.
+  // The first order KKT conditions state that the minimum x*
+  // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
+  // the trust region), or
+  //
+  //   (B x* + g) + y x* = 0
+  //
+  // for some positive scalar y.
+  // Here, as it is already known that the minimum lies on the boundary, the
+  // latter condition is tested. To allow for small imprecisions, we test if
+  // the angle between (B x* + g) and -x* is smaller than acos(0.99).
+  // The exact value of the cosine is arbitrary but should be close to 1.
+  //
+  // This condition should not be violated. If it is, the minimum was not
+  // correctly determined.
+  const double kCosineThreshold = 0.99;
+  const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
+  const double cosine_angle = -minimum.dot(grad_minimum) /
+      (minimum.norm() * grad_minimum.norm());
+  if (cosine_angle < kCosineThreshold) {
+    LOG(WARNING) << "First order optimality seems to be violated "
+                 << "in the subspace method!\n"
+                 << "Cosine of angle between x and B x + g is "
+                 << cosine_angle << ".\n"
+                 << "Taking a regular dogleg step instead.\n"
+                 << "Please consider filing a bug report if this "
+                 << "happens frequently or consistently.\n";
+    ComputeTraditionalDoglegStep(dogleg);
+    return;
+  }
+
+  // Create the full step from the optimal 2d solution.
+  dogleg_step = subspace_basis_ * minimum;
+  dogleg_step_norm_ = radius_;
+  dogleg_step.array() /= diagonal_.array();
+  VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
+          << " radius: " << radius_;
+}
+
+// Build the polynomial that defines the optimal Lagrange multipliers.
+// Let the Lagrangian be
+//
+//   L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2).       (1)
+//
+// Stationary points of the Lagrangian are given by
+//
+//   0 = d L(x, y) / dx = Bx + g + y x                              (2)
+//   0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2                       (3)
+//
+// For any given y, we can solve (2) for x as
+//
+//   x(y) = -(B + y I)^-1 g .                                       (4)
+//
+// As B + y I is 2x2, we form the inverse explicitly:
+//
+//   (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I)                 (5)
+//
+// where adj() denotes adjugation. This should be safe, as B is positive
+// semi-definite and y is necessarily positive, so (B + y I) is indeed
+// invertible.
+// Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
+// obtain
+//
+//   0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
+//                                                                  (6)
+//
+// or
+//
+//   det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g         (7a)
+//                      = g^T adj(B)^T adj(B) g
+//                           + 2 y g^T adj(B)^T g + y^2 g^T g       (7b)
+//
+// as
+//
+//   adj(B + y I) = adj(B) + y I = adj(B)^T + y I .                 (8)
+//
+// The left hand side can be expressed explicitly using
+//
+//   det(B + y I) = det(B) + y tr(B) + y^2 .                        (9)
+//
+// So (7) is a polynomial in y of degree four.
+// Bringing everything back to the left hand side, the coefficients can
+// be read off as
+//
+//     y^4  r^2
+//   + y^3  2 r^2 tr(B)
+//   + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
+//   + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
+//   + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
+//
+Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
+  const double detB = subspace_B_.determinant();
+  const double trB = subspace_B_.trace();
+  const double r2 = radius_ * radius_;
+  Matrix2d B_adj;
+  B_adj <<  subspace_B_(1, 1) , -subspace_B_(0, 1),
+            -subspace_B_(1, 0) ,  subspace_B_(0, 0);
+
+  Vector polynomial(5);
+  polynomial(0) = r2;
+  polynomial(1) = 2.0 * r2 * trB;
+  polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
+  polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
+      - r2 * detB * trB);
+  polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
+
+  return polynomial;
+}
+
+// Given a Lagrange multiplier y that corresponds to a stationary point
+// of the Lagrangian L(x, y), compute the corresponding x from the
+// equation
+//
+//   0 = d L(x, y) / dx
+//     = B * x + g + y * x
+//     = (B + y * I) * x + g
+//
+DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
+    double y) const {
+  const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
+  return -B_i.partialPivLu().solve(subspace_g_);
+}
+
+// This function evaluates the quadratic model at a point x in the
+// subspace spanned by subspace_basis_.
+double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
+  return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
+}
+
+// This function attempts to solve the boundary-constrained subspace problem
+//
+//   min. 1/2 x^T B^T H B x + g^T B x
+//   s.t. || B x ||^2 = r^2
+//
+// where B is an orthonormal subspace basis and r is the trust-region radius.
+//
+// This is done by finding the roots of a fourth degree polynomial. If the
+// root finding fails, the function returns false and minimum will be set
+// to (0, 0). If it succeeds, true is returned.
+//
+// In the failure case, another step should be taken, such as the traditional
+// dogleg step.
+bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
+  CHECK(minimum != nullptr);
+
+  // Return (0, 0) in all error cases.
+  minimum->setZero();
+
+  // Create the fourth-degree polynomial that is a necessary condition for
+  // optimality.
+  const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
+
+  // Find the real parts y_i of its roots (not only the real roots).
+  Vector roots_real;
+  if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
+    // Failed to find the roots of the polynomial, i.e. the candidate
+    // solutions of the constrained problem. Report this back to the caller.
+    return false;
+  }
+
+  // For each root y, compute B x(y) and check for feasibility.
+  // Notice that there should always be four roots, as the leading term of
+  // the polynomial is r^2 and therefore non-zero. However, as some roots
+  // may be complex, the real parts are not necessarily unique.
+  double minimum_value = std::numeric_limits<double>::max();
+  bool valid_root_found = false;
+  for (int i = 0; i < roots_real.size(); ++i) {
+    const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
+
+    // Not all roots correspond to points on the trust region boundary.
+    // There are at most four candidate solutions. As we are interested
+    // in the minimum, it is safe to consider all of them after projecting
+    // them onto the trust region boundary.
+    if (x_i.norm() > 0) {
+      const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
+      valid_root_found = true;
+      if (f_i < minimum_value) {
+        minimum_value = f_i;
+        *minimum = x_i;
+      }
+    }
+  }
+
+  return valid_root_found;
+}
+
+LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
+    const PerSolveOptions& per_solve_options,
+    SparseMatrix* jacobian,
+    const double* residuals) {
+  const int n = jacobian->num_cols();
+  LinearSolver::Summary linear_solver_summary;
+  linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+
+  // The Jacobian matrix is often quite poorly conditioned. Thus it is
+  // necessary to add a diagonal matrix at the bottom to prevent the
+  // linear solver from failing.
+  //
+  // We do this by computing the same diagonal matrix as the one used
+  // by Levenberg-Marquardt (other choices are possible), and scaling
+  // it by a small constant (independent of the trust region radius).
+  //
+  // If the solve fails, the multiplier to the diagonal is increased
+  // up to max_mu_ by a factor of mu_increase_factor_ every time. If
+  // the linear solver is still not successful, the strategy returns
+  // with LINEAR_SOLVER_FAILURE.
+  //
+  // Next time when a new Gauss-Newton step is requested, the
+  // multiplier starts out from the last successful solve.
+  //
+  // When a step is declared successful, the multiplier is decreased
+  // by half of mu_increase_factor_.
+
+  while (mu_ < max_mu_) {
+    // Dogleg, as far as I (sameeragarwal) understand it, requires a
+    // reasonably good estimate of the Gauss-Newton step. This means
+    // that we need to solve the normal equations more or less
+    // exactly. This is reflected in the values of the tolerances set
+    // below.
+    //
+    // For now, this strategy should only be used with exact
+    // factorization based solvers, for which these tolerances are
+    // automatically satisfied.
+    //
+    // The right way to combine inexact solves with trust region
+    // methods is to use Stiehaug's method.
+    LinearSolver::PerSolveOptions solve_options;
+    solve_options.q_tolerance = 0.0;
+    solve_options.r_tolerance = 0.0;
+
+    lm_diagonal_ = diagonal_ * std::sqrt(mu_);
+    solve_options.D = lm_diagonal_.data();
+
+    // As in the LevenbergMarquardtStrategy, solve Jy = r instead
+    // of Jx = -r and later set x = -y to avoid having to modify
+    // either jacobian or residuals.
+    InvalidateArray(n, gauss_newton_step_.data());
+    linear_solver_summary = linear_solver_->Solve(jacobian,
+                                                  residuals,
+                                                  solve_options,
+                                                  gauss_newton_step_.data());
+
+    if (per_solve_options.dump_format_type == CONSOLE ||
+        (per_solve_options.dump_format_type != CONSOLE &&
+         !per_solve_options.dump_filename_base.empty())) {
+      if (!DumpLinearLeastSquaresProblem(per_solve_options.dump_filename_base,
+                                         per_solve_options.dump_format_type,
+                                         jacobian,
+                                         solve_options.D,
+                                         residuals,
+                                         gauss_newton_step_.data(),
+                                         0)) {
+        LOG(ERROR) << "Unable to dump trust region problem."
+                   << " Filename base: "
+                   << per_solve_options.dump_filename_base;
+      }
+    }
+
+    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+      return linear_solver_summary;
+    }
+
+    if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
+        !IsArrayValid(n, gauss_newton_step_.data())) {
+      mu_ *= mu_increase_factor_;
+      VLOG(2) << "Increasing mu " << mu_;
+      linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+      continue;
+    }
+    break;
+  }
+
+  if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
+    // The scaled Gauss-Newton step is D * GN:
+    //
+    //     - (D^-1 J^T J D^-1)^-1 (D^-1 g)
+    //   = - D (J^T J)^-1 D D^-1 g
+    //   = D -(J^T J)^-1 g
+    //
+    gauss_newton_step_.array() *= -diagonal_.array();
+  }
+
+  return linear_solver_summary;
+}
+
+void DoglegStrategy::StepAccepted(double step_quality) {
+  CHECK_GT(step_quality, 0.0);
+
+  if (step_quality < decrease_threshold_) {
+    radius_ *= 0.5;
+  }
+
+  if (step_quality > increase_threshold_) {
+    radius_ = std::max(radius_, 3.0 * dogleg_step_norm_);
+  }
+
+  // Reduce the regularization multiplier, in the hope that whatever
+  // was causing the rank deficiency has gone away and we can return
+  // to doing a pure Gauss-Newton solve.
+  mu_ = std::max(min_mu_, 2.0 * mu_ / mu_increase_factor_);
+  reuse_ = false;
+}
+
+void DoglegStrategy::StepRejected(double step_quality) {
+  radius_ *= 0.5;
+  reuse_ = true;
+}
+
+void DoglegStrategy::StepIsInvalid() {
+  mu_ *= mu_increase_factor_;
+  reuse_ = false;
+}
+
+double DoglegStrategy::Radius() const {
+  return radius_;
+}
+
+bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
+  // Compute an orthogonal basis for the subspace using QR decomposition.
+  Matrix basis_vectors(jacobian->num_cols(), 2);
+  basis_vectors.col(0) = gradient_;
+  basis_vectors.col(1) = gauss_newton_step_;
+  Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
+
+  switch (basis_qr.rank()) {
+    case 0:
+      // This should never happen, as it implies that both the gradient
+      // and the Gauss-Newton step are zero. In this case, the minimizer should
+      // have stopped due to the gradient being too small.
+      LOG(ERROR) << "Rank of subspace basis is 0. "
+                 << "This means that the gradient at the current iterate is "
+                 << "zero but the optimization has not been terminated. "
+                 << "You may have found a bug in Ceres.";
+      return false;
+
+    case 1:
+      // Gradient and Gauss-Newton step coincide, so we lie on one of the
+      // major axes of the quadratic problem. In this case, we simply move
+      // along the gradient until we reach the trust region boundary.
+      subspace_is_one_dimensional_ = true;
+      return true;
+
+    case 2:
+      subspace_is_one_dimensional_ = false;
+      break;
+
+    default:
+      LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
+                 << "greater than 2. As the matrix contains only two "
+                 << "columns this cannot be true and is indicative of "
+                 << "a bug.";
+      return false;
+  }
+
+  // The subspace is two-dimensional, so compute the subspace model.
+  // Given the basis U, this is
+  //
+  //   subspace_g_ = g_scaled^T U
+  //
+  // and
+  //
+  //   subspace_B_ = U^T (J_scaled^T J_scaled) U
+  //
+  // As J_scaled = J * D^-1, the latter becomes
+  //
+  //   subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
+  //               = (J (D^-1 U))^T (J (D^-1 U))
+
+  subspace_basis_ =
+      basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
+
+  subspace_g_ = subspace_basis_.transpose() * gradient_;
+
+  Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
+      Jb(2, jacobian->num_rows());
+  Jb.setZero();
+
+  Vector tmp;
+  tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
+  jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
+  tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
+  jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
+
+  subspace_B_ = Jb * Jb.transpose();
+
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h
new file mode 100644
index 0000000..11a3bb0
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.h
@@ -0,0 +1,165 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
+#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Dogleg step computation and trust region sizing strategy based on
+// on "Methods for Nonlinear Least Squares" by K. Madsen, H.B. Nielsen
+// and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// One minor modification is that instead of computing the pure
+// Gauss-Newton step, we compute a regularized version of it. This is
+// because the Jacobian is often rank-deficient and in such cases
+// using a direct solver leads to numerical failure.
+//
+// If SUBSPACE is passed as the type argument to the constructor, the
+// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
+// This finds the exact optimum over the two-dimensional subspace
+// spanned by the two Dogleg vectors.
+class DoglegStrategy : public TrustRegionStrategy {
+ public:
+  explicit DoglegStrategy(const TrustRegionStrategy::Options& options);
+  virtual ~DoglegStrategy() {}
+
+  // TrustRegionStrategy interface
+  virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+                              SparseMatrix* jacobian,
+                              const double* residuals,
+                              double* step);
+  virtual void StepAccepted(double step_quality);
+  virtual void StepRejected(double step_quality);
+  virtual void StepIsInvalid();
+
+  virtual double Radius() const;
+
+  // These functions are predominantly for testing.
+  Vector gradient() const { return gradient_; }
+  Vector gauss_newton_step() const { return gauss_newton_step_; }
+  Matrix subspace_basis() const { return subspace_basis_; }
+  Vector subspace_g() const { return subspace_g_; }
+  Matrix subspace_B() const { return subspace_B_; }
+
+ private:
+  typedef Eigen::Matrix<double, 2, 1, Eigen::DontAlign> Vector2d;
+  typedef Eigen::Matrix<double, 2, 2, Eigen::DontAlign> Matrix2d;
+
+  LinearSolver::Summary ComputeGaussNewtonStep(
+      const PerSolveOptions& per_solve_options,
+      SparseMatrix* jacobian,
+      const double* residuals);
+  void ComputeCauchyPoint(SparseMatrix* jacobian);
+  void ComputeGradient(SparseMatrix* jacobian, const double* residuals);
+  void ComputeTraditionalDoglegStep(double* step);
+  bool ComputeSubspaceModel(SparseMatrix* jacobian);
+  void ComputeSubspaceDoglegStep(double* step);
+
+  bool FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const;
+  Vector MakePolynomialForBoundaryConstrainedProblem() const;
+  Vector2d ComputeSubspaceStepFromRoot(double lambda) const;
+  double EvaluateSubspaceModel(const Vector2d& x) const;
+
+  LinearSolver* linear_solver_;
+  double radius_;
+  const double max_radius_;
+
+  const double min_diagonal_;
+  const double max_diagonal_;
+
+  // mu is used to scale the diagonal matrix used to make the
+  // Gauss-Newton solve full rank. In each solve, the strategy starts
+  // out with mu = min_mu, and tries values up to max_mu. If the user
+  // reports an invalid step, the value of mu_ is increased so that
+  // the next solve starts with a stronger regularization.
+  //
+  // If a successful step is reported, then the value of mu_ is
+  // decreased with a lower bound of min_mu_.
+  double mu_;
+  const double min_mu_;
+  const double max_mu_;
+  const double mu_increase_factor_;
+  const double increase_threshold_;
+  const double decrease_threshold_;
+
+  Vector diagonal_;  // sqrt(diag(J^T J))
+  Vector lm_diagonal_;
+
+  Vector gradient_;
+  Vector gauss_newton_step_;
+
+  // cauchy_step = alpha * gradient
+  double alpha_;
+  double dogleg_step_norm_;
+
+  // When, ComputeStep is called, reuse_ indicates whether the
+  // Gauss-Newton and Cauchy steps from the last call to ComputeStep
+  // can be reused or not.
+  //
+  // If the user called StepAccepted, then it is expected that the
+  // user has recomputed the Jacobian matrix and new Gauss-Newton
+  // solve is needed and reuse is set to false.
+  //
+  // If the user called StepRejected, then it is expected that the
+  // user wants to solve the trust region problem with the same matrix
+  // but a different trust region radius and the Gauss-Newton and
+  // Cauchy steps can be reused to compute the Dogleg, thus reuse is
+  // set to true.
+  //
+  // If the user called StepIsInvalid, then there was a numerical
+  // problem with the step computed in the last call to ComputeStep,
+  // and the regularization used to do the Gauss-Newton solve is
+  // increased and a new solve should be done when ComputeStep is
+  // called again, thus reuse is set to false.
+  bool reuse_;
+
+  // The dogleg type determines how the minimum of the local
+  // quadratic model is found.
+  DoglegType dogleg_type_;
+
+  // If the type is SUBSPACE_DOGLEG, the two-dimensional
+  // model 1/2 x^T B x + g^T x has to be computed and stored.
+  bool subspace_is_one_dimensional_;
+  Matrix subspace_basis_;
+  Vector2d subspace_g_;
+  Matrix2d subspace_B_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DOGLEG_STRATEGY_H_
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
new file mode 100644
index 0000000..c435be6
--- /dev/null
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -0,0 +1,287 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#include <limits>
+#include <memory>
+#include "ceres/internal/eigen.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/dogleg_strategy.h"
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+class Fixture : public testing::Test {
+ protected:
+  std::unique_ptr<DenseSparseMatrix> jacobian_;
+  Vector residual_;
+  Vector x_;
+  TrustRegionStrategy::Options options_;
+};
+
+// A test problem where
+//
+//   J^T J = Q diag([1 2 4 8 16 32]) Q^T
+//
+// where Q is a randomly chosen orthonormal basis of R^6.
+// The residual is chosen so that the minimum of the quadratic function is
+// at (1, 1, 1, 1, 1, 1). It is therefore at a distance of sqrt(6) ~ 2.45
+// from the origin.
+class DoglegStrategyFixtureEllipse : public Fixture {
+ protected:
+  virtual void SetUp() {
+    Matrix basis(6, 6);
+    // The following lines exceed 80 characters for better readability.
+    basis << -0.1046920933796121, -0.7449367449921986, -0.4190744502875876, -0.4480450716142566,  0.2375351607929440, -0.0363053418882862,  // NOLINT
+              0.4064975684355914,  0.2681113508511354, -0.7463625494601520, -0.0803264850508117, -0.4463149623021321,  0.0130224954867195,  // NOLINT
+             -0.5514387729089798,  0.1026621026168657, -0.5008316122125011,  0.5738122212666414,  0.2974664724007106,  0.1296020877535158,  // NOLINT
+              0.5037835370947156,  0.2668479925183712, -0.1051754618492798, -0.0272739396578799,  0.7947481647088278, -0.1776623363955670,  // NOLINT
+             -0.4005458426625444,  0.2939330589634109, -0.0682629380550051, -0.2895448882503687, -0.0457239396341685, -0.8139899477847840,  // NOLINT
+             -0.3247764582762654,  0.4528151365941945, -0.0276683863102816, -0.6155994592510784,  0.1489240599972848,  0.5362574892189350;  // NOLINT
+
+    Vector Ddiag(6);
+    Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
+
+    Matrix sqrtD = Ddiag.array().sqrt().matrix().asDiagonal();
+    Matrix jacobian = sqrtD * basis;
+    jacobian_.reset(new DenseSparseMatrix(jacobian));
+
+    Vector minimum(6);
+    minimum << 1.0, 1.0, 1.0, 1.0, 1.0, 1.0;
+    residual_ = -jacobian * minimum;
+
+    x_.resize(6);
+    x_.setZero();
+
+    options_.min_lm_diagonal = 1.0;
+    options_.max_lm_diagonal = 1.0;
+  }
+};
+
+// A test problem where
+//
+//   J^T J = diag([1 2 4 8 16 32]) .
+//
+// The residual is chosen so that the minimum of the quadratic function is
+// at (0, 0, 1, 0, 0, 0). It is therefore at a distance of 1 from the origin.
+// The gradient at the origin points towards the global minimum.
+class DoglegStrategyFixtureValley : public Fixture {
+ protected:
+  virtual void SetUp() {
+    Vector Ddiag(6);
+    Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
+
+    Matrix jacobian = Ddiag.asDiagonal();
+    jacobian_.reset(new DenseSparseMatrix(jacobian));
+
+    Vector minimum(6);
+    minimum << 0.0, 0.0, 1.0, 0.0, 0.0, 0.0;
+    residual_ = -jacobian * minimum;
+
+    x_.resize(6);
+    x_.setZero();
+
+    options_.min_lm_diagonal = 1.0;
+    options_.max_lm_diagonal = 1.0;
+  }
+};
+
+const double kTolerance = 1e-14;
+const double kToleranceLoose = 1e-5;
+const double kEpsilon = std::numeric_limits<double>::epsilon();
+
+}  // namespace
+
+// The DoglegStrategy must never return a step that is longer than the current
+// trust region radius.
+TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedTraditional) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  // The global minimum is at (1, 1, ..., 1), so the distance to it is
+  // sqrt(6.0).  By restricting the trust region to a radius of 2.0,
+  // we test if the trust region is actually obeyed.
+  options_.dogleg_type = TRADITIONAL_DOGLEG;
+  options_.initial_radius = 2.0;
+  options_.max_radius = 2.0;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+                                                              jacobian_.get(),
+                                                              residual_.data(),
+                                                              x_.data());
+
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
+}
+
+TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedSubspace) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  options_.dogleg_type = SUBSPACE_DOGLEG;
+  options_.initial_radius = 2.0;
+  options_.max_radius = 2.0;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+                                                              jacobian_.get(),
+                                                              residual_.data(),
+                                                              x_.data());
+
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
+}
+
+TEST_F(DoglegStrategyFixtureEllipse, CorrectGaussNewtonStep) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  options_.dogleg_type = SUBSPACE_DOGLEG;
+  options_.initial_radius = 10.0;
+  options_.max_radius = 10.0;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+                                                              jacobian_.get(),
+                                                              residual_.data(),
+                                                              x_.data());
+
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(1), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(3), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(4), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(5), 1.0, kToleranceLoose);
+}
+
+// Test if the subspace basis is a valid orthonormal basis of the space spanned
+// by the gradient and the Gauss-Newton point.
+TEST_F(DoglegStrategyFixtureEllipse, ValidSubspaceBasis) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  options_.dogleg_type = SUBSPACE_DOGLEG;
+  options_.initial_radius = 2.0;
+  options_.max_radius = 2.0;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  strategy.ComputeStep(pso, jacobian_.get(), residual_.data(), x_.data());
+
+  // Check if the basis is orthonormal.
+  const Matrix basis = strategy.subspace_basis();
+  EXPECT_NEAR(basis.col(0).norm(), 1.0, kTolerance);
+  EXPECT_NEAR(basis.col(1).norm(), 1.0, kTolerance);
+  EXPECT_NEAR(basis.col(0).dot(basis.col(1)), 0.0, kTolerance);
+
+  // Check if the gradient projects onto itself.
+  const Vector gradient = strategy.gradient();
+  EXPECT_NEAR((gradient - basis*(basis.transpose()*gradient)).norm(),
+              0.0,
+              kTolerance);
+
+  // Check if the Gauss-Newton point projects onto itself.
+  const Vector gn = strategy.gauss_newton_step();
+  EXPECT_NEAR((gn - basis*(basis.transpose()*gn)).norm(),
+              0.0,
+              kTolerance);
+}
+
+// Test if the step is correct if the gradient and the Gauss-Newton step point
+// in the same direction and the Gauss-Newton step is outside the trust region,
+// i.e. the trust region is active.
+TEST_F(DoglegStrategyFixtureValley, CorrectStepLocalOptimumAlongGradient) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  options_.dogleg_type = SUBSPACE_DOGLEG;
+  options_.initial_radius = 0.25;
+  options_.max_radius = 0.25;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+                                                              jacobian_.get(),
+                                                              residual_.data(),
+                                                              x_.data());
+
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(2), options_.initial_radius, kToleranceLoose);
+  EXPECT_NEAR(x_(3), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(4), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(5), 0.0, kToleranceLoose);
+}
+
+// Test if the step is correct if the gradient and the Gauss-Newton step point
+// in the same direction and the Gauss-Newton step is inside the trust region,
+// i.e. the trust region is inactive.
+TEST_F(DoglegStrategyFixtureValley, CorrectStepGlobalOptimumAlongGradient) {
+  std::unique_ptr<LinearSolver> linear_solver(
+      new DenseQRSolver(LinearSolver::Options()));
+  options_.linear_solver = linear_solver.get();
+  options_.dogleg_type = SUBSPACE_DOGLEG;
+  options_.initial_radius = 2.0;
+  options_.max_radius = 2.0;
+
+  DoglegStrategy strategy(options_);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+                                                              jacobian_.get(),
+                                                              residual_.data(),
+                                                              x_.data());
+
+  EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
+  EXPECT_NEAR(x_(3), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(4), 0.0, kToleranceLoose);
+  EXPECT_NEAR(x_(5), 0.0, kToleranceLoose);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_autodiff_cost_function_test.cc b/internal/ceres/dynamic_autodiff_cost_function_test.cc
new file mode 100644
index 0000000..29f8d10
--- /dev/null
+++ b/internal/ceres/dynamic_autodiff_cost_function_test.cc
@@ -0,0 +1,775 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: thadh@gmail.com (Thad Hughes)
+//         mierle@gmail.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cstddef>
+
+#include <memory>
+#include "ceres/dynamic_autodiff_cost_function.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// Takes 2 parameter blocks:
+//     parameters[0] is size 10.
+//     parameters[1] is size 5.
+// Emits 21 residuals:
+//     A: i - parameters[0][i], for i in [0,10)  -- this is 10 residuals
+//     B: parameters[0][i] - i, for i in [0,10)  -- this is another 10.
+//     C: sum(parameters[0][i]^2 - 8*parameters[0][i]) + sum(parameters[1][i])
+class MyCostFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    const T* params0 = parameters[0];
+    int r = 0;
+    for (int i = 0; i < 10; ++i) {
+      residuals[r++] = T(i) - params0[i];
+      residuals[r++] = params0[i] - T(i);
+    }
+
+    T c_residual(0.0);
+    for (int i = 0; i < 10; ++i) {
+      c_residual += pow(params0[i], 2) - T(8) * params0[i];
+    }
+
+    const T* params1 = parameters[1];
+    for (int i = 0; i < 5; ++i) {
+      c_residual += params1[i];
+    }
+    residuals[r++] = c_residual;
+    return true;
+  }
+};
+
+TEST(DynamicAutodiffCostFunctionTest, TestResiduals) {
+  vector<double> param_block_0(10, 0.0);
+  vector<double> param_block_1(5, 0.0);
+  DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Test residual computation.
+  vector<double> residuals(21, -100000);
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+  EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
+                                     residuals.data(),
+                                     NULL));
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(0, residuals.at(20));
+}
+
+TEST(DynamicAutodiffCostFunctionTest, TestJacobian) {
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_EQ(-1.0, jacobian_vect[0][2*p * 10 + p]);
+    // Check "B" Jacobian.
+    EXPECT_EQ(+1.0, jacobian_vect[0][(2*p+1) * 10 + p]);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_EQ(4 * p - 8, jacobian_vect[0][20 * 10 + p]);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[0][i]);
+  }
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_EQ(1.0, jacobian_vect[1][20 * 5 + p]);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[1][i]);
+  }
+}
+
+TEST(DynamicAutodiffCostFunctionTest, JacobianWithFirstParameterBlockConstant) {
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_EQ(1.0, jacobian_vect[1][20 * 5 + p]);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[1][i]);
+  }
+}
+
+TEST(DynamicAutodiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) {  // NOLINT
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(NULL);
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_EQ(-1.0, jacobian_vect[0][2*p * 10 + p]);
+    // Check "B" Jacobian.
+    EXPECT_EQ(+1.0, jacobian_vect[0][(2*p+1) * 10 + p]);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_EQ(4 * p - 8, jacobian_vect[0][20 * 10 + p]);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[0][i]);
+  }
+}
+
+// Takes 3 parameter blocks:
+//     parameters[0] (x) is size 1.
+//     parameters[1] (y) is size 2.
+//     parameters[2] (z) is size 3.
+// Emits 7 residuals:
+//     A: x[0] (= sum_x)
+//     B: y[0] + 2.0 * y[1] (= sum_y)
+//     C: z[0] + 3.0 * z[1] + 6.0 * z[2] (= sum_z)
+//     D: sum_x * sum_y
+//     E: sum_y * sum_z
+//     F: sum_x * sum_z
+//     G: sum_x * sum_y * sum_z
+class MyThreeParameterCostFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    const T* x = parameters[0];
+    const T* y = parameters[1];
+    const T* z = parameters[2];
+
+    T sum_x = x[0];
+    T sum_y = y[0] + 2.0 * y[1];
+    T sum_z = z[0] + 3.0 * z[1] + 6.0 * z[2];
+
+    residuals[0] = sum_x;
+    residuals[1] = sum_y;
+    residuals[2] = sum_z;
+    residuals[3] = sum_x * sum_y;
+    residuals[4] = sum_y * sum_z;
+    residuals[5] = sum_x * sum_z;
+    residuals[6] = sum_x * sum_y * sum_z;
+    return true;
+  }
+};
+
+class ThreeParameterCostFunctorTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // Prepare the parameters.
+    x_.resize(1);
+    x_[0] = 0.0;
+
+    y_.resize(2);
+    y_[0] = 1.0;
+    y_[1] = 3.0;
+
+    z_.resize(3);
+    z_[0] = 2.0;
+    z_[1] = 4.0;
+    z_[2] = 6.0;
+
+    parameter_blocks_.resize(3);
+    parameter_blocks_[0] = &x_[0];
+    parameter_blocks_[1] = &y_[0];
+    parameter_blocks_[2] = &z_[0];
+
+    // Prepare the cost function.
+    typedef DynamicAutoDiffCostFunction<MyThreeParameterCostFunctor, 3>
+      DynamicMyThreeParameterCostFunction;
+    DynamicMyThreeParameterCostFunction * cost_function =
+      new DynamicMyThreeParameterCostFunction(
+        new MyThreeParameterCostFunctor());
+    cost_function->AddParameterBlock(1);
+    cost_function->AddParameterBlock(2);
+    cost_function->AddParameterBlock(3);
+    cost_function->SetNumResiduals(7);
+
+    cost_function_.reset(cost_function);
+
+    // Setup jacobian data.
+    jacobian_vect_.resize(3);
+    jacobian_vect_[0].resize(7 * x_.size(), -100000);
+    jacobian_vect_[1].resize(7 * y_.size(), -100000);
+    jacobian_vect_[2].resize(7 * z_.size(), -100000);
+
+    // Prepare the expected residuals.
+    const double sum_x = x_[0];
+    const double sum_y = y_[0] + 2.0 * y_[1];
+    const double sum_z = z_[0] + 3.0 * z_[1] + 6.0 * z_[2];
+
+    expected_residuals_.resize(7);
+    expected_residuals_[0] = sum_x;
+    expected_residuals_[1] = sum_y;
+    expected_residuals_[2] = sum_z;
+    expected_residuals_[3] = sum_x * sum_y;
+    expected_residuals_[4] = sum_y * sum_z;
+    expected_residuals_[5] = sum_x * sum_z;
+    expected_residuals_[6] = sum_x * sum_y * sum_z;
+
+    // Prepare the expected jacobian entries.
+    expected_jacobian_x_.resize(7);
+    expected_jacobian_x_[0] = 1.0;
+    expected_jacobian_x_[1] = 0.0;
+    expected_jacobian_x_[2] = 0.0;
+    expected_jacobian_x_[3] = sum_y;
+    expected_jacobian_x_[4] = 0.0;
+    expected_jacobian_x_[5] = sum_z;
+    expected_jacobian_x_[6] = sum_y * sum_z;
+
+    expected_jacobian_y_.resize(14);
+    expected_jacobian_y_[0] = 0.0;
+    expected_jacobian_y_[1] = 0.0;
+    expected_jacobian_y_[2] = 1.0;
+    expected_jacobian_y_[3] = 2.0;
+    expected_jacobian_y_[4] = 0.0;
+    expected_jacobian_y_[5] = 0.0;
+    expected_jacobian_y_[6] = sum_x;
+    expected_jacobian_y_[7] = 2.0 * sum_x;
+    expected_jacobian_y_[8] = sum_z;
+    expected_jacobian_y_[9] = 2.0 * sum_z;
+    expected_jacobian_y_[10] = 0.0;
+    expected_jacobian_y_[11] = 0.0;
+    expected_jacobian_y_[12] = sum_x * sum_z;
+    expected_jacobian_y_[13] = 2.0 * sum_x * sum_z;
+
+    expected_jacobian_z_.resize(21);
+    expected_jacobian_z_[0] = 0.0;
+    expected_jacobian_z_[1] = 0.0;
+    expected_jacobian_z_[2] = 0.0;
+    expected_jacobian_z_[3] = 0.0;
+    expected_jacobian_z_[4] = 0.0;
+    expected_jacobian_z_[5] = 0.0;
+    expected_jacobian_z_[6] = 1.0;
+    expected_jacobian_z_[7] = 3.0;
+    expected_jacobian_z_[8] = 6.0;
+    expected_jacobian_z_[9] = 0.0;
+    expected_jacobian_z_[10] = 0.0;
+    expected_jacobian_z_[11] = 0.0;
+    expected_jacobian_z_[12] = sum_y;
+    expected_jacobian_z_[13] = 3.0 * sum_y;
+    expected_jacobian_z_[14] = 6.0 * sum_y;
+    expected_jacobian_z_[15] = sum_x;
+    expected_jacobian_z_[16] = 3.0 * sum_x;
+    expected_jacobian_z_[17] = 6.0 * sum_x;
+    expected_jacobian_z_[18] = sum_x * sum_y;
+    expected_jacobian_z_[19] = 3.0 * sum_x * sum_y;
+    expected_jacobian_z_[20] = 6.0 * sum_x * sum_y;
+  }
+
+ protected:
+  vector<double> x_;
+  vector<double> y_;
+  vector<double> z_;
+
+  vector<double*> parameter_blocks_;
+
+  std::unique_ptr<CostFunction> cost_function_;
+
+  vector<vector<double>> jacobian_vect_;
+
+  vector<double> expected_residuals_;
+
+  vector<double> expected_jacobian_x_;
+  vector<double> expected_jacobian_y_;
+  vector<double> expected_jacobian_z_;
+};
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
+  vector<double> residuals(7, -100000);
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       NULL));
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterJacobian) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_jacobian_x_[i], jacobian[0][i]);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_EQ(expected_jacobian_y_[i], jacobian[1][i]);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_EQ(expected_jacobian_z_[i], jacobian[2][i]);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithFirstAndLastParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(NULL);
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_EQ(expected_jacobian_y_[i], jacobian[1][i]);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithSecondParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_jacobian_x_[i], jacobian[0][i]);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_EQ(expected_jacobian_z_[i], jacobian[2][i]);
+  }
+}
+
+// Takes 6 parameter blocks all of size 1:
+//     x0, y0, y1, z0, z1, z2
+// Same 7 residuals as MyThreeParameterCostFunctor.
+// Naming convention for tests is (V)ariable and (C)onstant.
+class MySixParameterCostFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    const T* x0 = parameters[0];
+    const T* y0 = parameters[1];
+    const T* y1 = parameters[2];
+    const T* z0 = parameters[3];
+    const T* z1 = parameters[4];
+    const T* z2 = parameters[5];
+
+    T sum_x = x0[0];
+    T sum_y = y0[0] + 2.0 * y1[0];
+    T sum_z = z0[0] + 3.0 * z1[0] + 6.0 * z2[0];
+
+    residuals[0] = sum_x;
+    residuals[1] = sum_y;
+    residuals[2] = sum_z;
+    residuals[3] = sum_x * sum_y;
+    residuals[4] = sum_y * sum_z;
+    residuals[5] = sum_x * sum_z;
+    residuals[6] = sum_x * sum_y * sum_z;
+    return true;
+  }
+};
+
+class SixParameterCostFunctorTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // Prepare the parameters.
+    x0_ = 0.0;
+    y0_ = 1.0;
+    y1_ = 3.0;
+    z0_ = 2.0;
+    z1_ = 4.0;
+    z2_ = 6.0;
+
+    parameter_blocks_.resize(6);
+    parameter_blocks_[0] = &x0_;
+    parameter_blocks_[1] = &y0_;
+    parameter_blocks_[2] = &y1_;
+    parameter_blocks_[3] = &z0_;
+    parameter_blocks_[4] = &z1_;
+    parameter_blocks_[5] = &z2_;
+
+    // Prepare the cost function.
+    typedef DynamicAutoDiffCostFunction<MySixParameterCostFunctor, 3>
+      DynamicMySixParameterCostFunction;
+    DynamicMySixParameterCostFunction * cost_function =
+      new DynamicMySixParameterCostFunction(
+        new MySixParameterCostFunctor());
+    for (int i = 0; i < 6; ++i) {
+      cost_function->AddParameterBlock(1);
+    }
+    cost_function->SetNumResiduals(7);
+
+    cost_function_.reset(cost_function);
+
+    // Setup jacobian data.
+    jacobian_vect_.resize(6);
+    for (int i = 0; i < 6; ++i) {
+      jacobian_vect_[i].resize(7, -100000);
+    }
+
+    // Prepare the expected residuals.
+    const double sum_x = x0_;
+    const double sum_y = y0_ + 2.0 * y1_;
+    const double sum_z = z0_ + 3.0 * z1_ + 6.0 * z2_;
+
+    expected_residuals_.resize(7);
+    expected_residuals_[0] = sum_x;
+    expected_residuals_[1] = sum_y;
+    expected_residuals_[2] = sum_z;
+    expected_residuals_[3] = sum_x * sum_y;
+    expected_residuals_[4] = sum_y * sum_z;
+    expected_residuals_[5] = sum_x * sum_z;
+    expected_residuals_[6] = sum_x * sum_y * sum_z;
+
+    // Prepare the expected jacobian entries.
+    expected_jacobians_.resize(6);
+    expected_jacobians_[0].resize(7);
+    expected_jacobians_[0][0] = 1.0;
+    expected_jacobians_[0][1] = 0.0;
+    expected_jacobians_[0][2] = 0.0;
+    expected_jacobians_[0][3] = sum_y;
+    expected_jacobians_[0][4] = 0.0;
+    expected_jacobians_[0][5] = sum_z;
+    expected_jacobians_[0][6] = sum_y * sum_z;
+
+    expected_jacobians_[1].resize(7);
+    expected_jacobians_[1][0] = 0.0;
+    expected_jacobians_[1][1] = 1.0;
+    expected_jacobians_[1][2] = 0.0;
+    expected_jacobians_[1][3] = sum_x;
+    expected_jacobians_[1][4] = sum_z;
+    expected_jacobians_[1][5] = 0.0;
+    expected_jacobians_[1][6] = sum_x * sum_z;
+
+    expected_jacobians_[2].resize(7);
+    expected_jacobians_[2][0] = 0.0;
+    expected_jacobians_[2][1] = 2.0;
+    expected_jacobians_[2][2] = 0.0;
+    expected_jacobians_[2][3] = 2.0 * sum_x;
+    expected_jacobians_[2][4] = 2.0 * sum_z;
+    expected_jacobians_[2][5] = 0.0;
+    expected_jacobians_[2][6] = 2.0 * sum_x * sum_z;
+
+    expected_jacobians_[3].resize(7);
+    expected_jacobians_[3][0] = 0.0;
+    expected_jacobians_[3][1] = 0.0;
+    expected_jacobians_[3][2] = 1.0;
+    expected_jacobians_[3][3] = 0.0;
+    expected_jacobians_[3][4] = sum_y;
+    expected_jacobians_[3][5] = sum_x;
+    expected_jacobians_[3][6] = sum_x * sum_y;
+
+    expected_jacobians_[4].resize(7);
+    expected_jacobians_[4][0] = 0.0;
+    expected_jacobians_[4][1] = 0.0;
+    expected_jacobians_[4][2] = 3.0;
+    expected_jacobians_[4][3] = 0.0;
+    expected_jacobians_[4][4] = 3.0 * sum_y;
+    expected_jacobians_[4][5] = 3.0 * sum_x;
+    expected_jacobians_[4][6] = 3.0 * sum_x * sum_y;
+
+    expected_jacobians_[5].resize(7);
+    expected_jacobians_[5][0] = 0.0;
+    expected_jacobians_[5][1] = 0.0;
+    expected_jacobians_[5][2] = 6.0;
+    expected_jacobians_[5][3] = 0.0;
+    expected_jacobians_[5][4] = 6.0 * sum_y;
+    expected_jacobians_[5][5] = 6.0 * sum_x;
+    expected_jacobians_[5][6] = 6.0 * sum_x * sum_y;
+  }
+
+ protected:
+  double x0_;
+  double y0_;
+  double y1_;
+  double z0_;
+  double z1_;
+  double z2_;
+
+  vector<double*> parameter_blocks_;
+
+  std::unique_ptr<CostFunction> cost_function_;
+
+  vector<vector<double>> jacobian_vect_;
+
+  vector<double> expected_residuals_;
+  vector<vector<double>> expected_jacobians_;
+};
+
+TEST_F(SixParameterCostFunctorTest, TestSixParameterResiduals) {
+  vector<double> residuals(7, -100000);
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       NULL));
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+}
+
+TEST_F(SixParameterCostFunctorTest, TestSixParameterJacobian) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(jacobian_vect_[2].data());
+  jacobian.push_back(jacobian_vect_[3].data());
+  jacobian.push_back(jacobian_vect_[4].data());
+  jacobian.push_back(jacobian_vect_[5].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 6; ++i) {
+    for (int j = 0; j < 7; ++j) {
+      EXPECT_EQ(expected_jacobians_[i][j], jacobian[i][j]);
+    }
+  }
+}
+
+TEST_F(SixParameterCostFunctorTest, TestSixParameterJacobianVVCVVC) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[3].data());
+  jacobian.push_back(jacobian_vect_[4].data());
+  jacobian.push_back(NULL);
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 6; ++i) {
+    // Skip the constant variables.
+    if (i == 2 || i == 5) {
+      continue;
+    }
+
+    for (int j = 0; j < 7; ++j) {
+      EXPECT_EQ(expected_jacobians_[i][j], jacobian[i][j]);
+    }
+  }
+}
+
+TEST_F(SixParameterCostFunctorTest, TestSixParameterJacobianVCCVCV) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[3].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[5].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 6; ++i) {
+    // Skip the constant variables.
+    if (i == 1 || i == 2 || i == 4) {
+      continue;
+    }
+
+    for (int j = 0; j < 7; ++j) {
+      EXPECT_EQ(expected_jacobians_[i][j], jacobian[i][j]);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_finalizer.h b/internal/ceres/dynamic_compressed_row_finalizer.h
new file mode 100644
index 0000000..a25a308
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -0,0 +1,51 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+
+#include "ceres/casts.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+struct DynamicCompressedRowJacobianFinalizer {
+  void operator()(SparseMatrix* base_jacobian, int num_parameters) {
+    DynamicCompressedRowSparseMatrix* jacobian =
+      down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+    jacobian->Finalize(num_parameters);
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALISER_H_
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
new file mode 100644
index 0000000..acc372a
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -0,0 +1,102 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
+#include "ceres/casts.h"
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+
+namespace ceres {
+namespace internal {
+
+using std::pair;
+using std::vector;
+
+ScratchEvaluatePreparer*
+DynamicCompressedRowJacobianWriter::CreateEvaluatePreparers(int num_threads) {
+  return ScratchEvaluatePreparer::Create(*program_, num_threads);
+}
+
+SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
+  DynamicCompressedRowSparseMatrix* jacobian =
+      new DynamicCompressedRowSparseMatrix(program_->NumResiduals(),
+                                           program_->NumEffectiveParameters(),
+                                           0 /* max_num_nonzeros */);
+  return jacobian;
+}
+
+void DynamicCompressedRowJacobianWriter::Write(int residual_id,
+                                               int residual_offset,
+                                               double** jacobians,
+                                               SparseMatrix* base_jacobian) {
+  DynamicCompressedRowSparseMatrix* jacobian =
+      down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+
+  // Get the `residual_block` of interest.
+  const ResidualBlock* residual_block =
+      program_->residual_blocks()[residual_id];
+  const int num_residuals = residual_block->NumResiduals();
+
+  vector<pair<int, int>> evaluated_jacobian_blocks;
+  CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+      program_, residual_id, &evaluated_jacobian_blocks);
+
+  // `residual_offset` is the residual row in the global jacobian.
+  // Empty the jacobian rows.
+  jacobian->ClearRows(residual_offset, num_residuals);
+
+  // Iterate over each parameter block.
+  for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+    const ParameterBlock* parameter_block =
+        program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
+    const int parameter_block_jacobian_index =
+        evaluated_jacobian_blocks[i].second;
+    const int parameter_block_size = parameter_block->LocalSize();
+
+    // For each parameter block only insert its non-zero entries.
+    for (int r = 0; r < num_residuals; ++r) {
+      for (int c = 0; c < parameter_block_size; ++c) {
+        const double& v = jacobians[parameter_block_jacobian_index]
+                                   [r * parameter_block_size + c];
+        // Only insert non-zero entries.
+        if (v != 0.0) {
+          jacobian->InsertEntry(
+              residual_offset + r, parameter_block->delta_offset() + c, v);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
new file mode 100644
index 0000000..6e5ac38
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A jacobian writer that directly writes to dynamic compressed row sparse
+// matrices.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+
+#include "ceres/evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+class DynamicCompressedRowJacobianWriter {
+ public:
+  DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
+                                     Program* program)
+    : program_(program) {
+  }
+
+  // JacobianWriter interface.
+
+  // The compressed row matrix has different layout than that assumed by
+  // the cost functions. The scratch space is therefore used to store
+  // the jacobians (including zeros) temporarily before only the non-zero
+  // entries are copied over to the larger jacobian in `Write`.
+  ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+
+  // Return a `DynamicCompressedRowSparseMatrix` which is filled by
+  // `Write`. Note that `Finalize` must be called to make the
+  // `CompressedRowSparseMatrix` interface valid.
+  SparseMatrix* CreateJacobian() const;
+
+  // Write only the non-zero jacobian entries for a residual block
+  // (specified by `residual_id`) into `base_jacobian`, starting at the row
+  // specifed by `residual_offset`.
+  //
+  // This method is thread-safe over residual blocks (each `residual_id`).
+  void Write(int residual_id,
+             int residual_offset,
+             double **jacobians,
+             SparseMatrix* base_jacobian);
+
+ private:
+  Program* program_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
new file mode 100644
index 0000000..f020768
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include <cstring>
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
+  int num_rows,
+  int num_cols,
+  int initial_max_num_nonzeros)
+    : CompressedRowSparseMatrix(num_rows,
+                                num_cols,
+                                initial_max_num_nonzeros) {
+    dynamic_cols_.resize(num_rows);
+    dynamic_values_.resize(num_rows);
+  }
+
+void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
+                                                   int col,
+                                                   const double& value) {
+  CHECK_GE(row, 0);
+  CHECK_LT(row, num_rows());
+  CHECK_GE(col, 0);
+  CHECK_LT(col, num_cols());
+  dynamic_cols_[row].push_back(col);
+  dynamic_values_[row].push_back(value);
+}
+
+void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
+                                                 int num_rows) {
+  for (int r = 0; r < num_rows; ++r) {
+    const int i = row_start + r;
+    CHECK_GE(i, 0);
+    CHECK_LT(i, this->num_rows());
+    dynamic_cols_[i].resize(0);
+    dynamic_values_[i].resize(0);
+  }
+}
+
+void DynamicCompressedRowSparseMatrix::Finalize(int num_additional_elements) {
+  // `num_additional_elements` is provided as an argument so that additional
+  // storage can be reserved when it is known by the finalizer.
+  CHECK_GE(num_additional_elements, 0);
+
+  // Count the number of non-zeros and resize `cols_` and `values_`.
+  int num_jacobian_nonzeros = 0;
+  for (int i = 0; i < dynamic_cols_.size(); ++i) {
+    num_jacobian_nonzeros += dynamic_cols_[i].size();
+  }
+
+  SetMaxNumNonZeros(num_jacobian_nonzeros + num_additional_elements);
+
+  // Flatten `dynamic_cols_` into `cols_` and `dynamic_values_`
+  // into `values_`.
+  int index_into_values_and_cols = 0;
+  for (int i = 0; i < num_rows(); ++i) {
+    mutable_rows()[i] = index_into_values_and_cols;
+    const int num_nonzero_columns = dynamic_cols_[i].size();
+    if (num_nonzero_columns > 0) {
+      memcpy(mutable_cols() + index_into_values_and_cols,
+             &dynamic_cols_[i][0],
+             dynamic_cols_[i].size() * sizeof(dynamic_cols_[0][0]));
+      memcpy(mutable_values() + index_into_values_and_cols,
+             &dynamic_values_[i][0],
+             dynamic_values_[i].size() * sizeof(dynamic_values_[0][0]));
+      index_into_values_and_cols += dynamic_cols_[i].size();
+    }
+  }
+  mutable_rows()[num_rows()] = index_into_values_and_cols;
+
+  CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
+    << "Ceres bug: final index into values_ and cols_ should be equal to "
+    << "the number of jacobian nonzeros. Please contact the developers!";
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.h b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
new file mode 100644
index 0000000..ad41da7
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A compressed row sparse matrix that provides an extended interface to
+// allow dynamic insertion of entries. This is provided for the use case
+// where the sparsity structure and number of non-zero entries is dynamic.
+// This flexibility is achieved by using an (internal) scratch space that
+// allows independent insertion of entries into each row (thread-safe).
+// Once insertion is complete, the `Finalize` method must be called to ensure
+// that the underlying `CompressedRowSparseMatrix` is consistent.
+//
+// This should only be used if you really do need a dynamic sparsity pattern.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+
+#include <vector>
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
+ public:
+  // Set the number of rows and columns for the underlyig
+  // `CompressedRowSparseMatrix` and set the initial number of maximum non-zero
+  // entries. Note that following the insertion of entries, when `Finalize`
+  // is called the number of non-zeros is determined and all internal
+  // structures are adjusted as required. If you know the upper limit on the
+  // number of non-zeros, then passing this value here can prevent future
+  // memory reallocations which may improve performance. Otherwise, if no
+  // upper limit is available a value of 0 is sufficient.
+  //
+  // Typical usage of this class is to define a new instance with a given
+  // number of rows, columns and maximum number of non-zero elements
+  // (if available). Next, entries are inserted at row and column positions
+  // using `InsertEntry`. Finally, once all elements have been inserted,
+  // `Finalize` must be called to make the underlying
+  // `CompressedRowSparseMatrix` consistent.
+  DynamicCompressedRowSparseMatrix(int num_rows,
+                                   int num_cols,
+                                   int initial_max_num_nonzeros);
+
+  // Insert an entry at a given row and column position. This method is
+  // thread-safe across rows i.e. different threads can insert values
+  // simultaneously into different rows. It should be emphasised that this
+  // method always inserts a new entry and does not check for existing
+  // entries at the specified row and column position. Duplicate entries
+  // for a given row and column position will result in undefined
+  // behavior.
+  void InsertEntry(int row, int col, const double& value);
+
+  // Clear all entries for rows, starting from row index `row_start`
+  // and proceeding for `num_rows`.
+  void ClearRows(int row_start, int num_rows);
+
+  // Make the underlying internal `CompressedRowSparseMatrix` data structures
+  // consistent. Additional space for non-zero entries in the
+  // `CompressedRowSparseMatrix` can be reserved by specifying
+  // `num_additional_elements`. This is useful when it is known that rows will
+  // be appended to the `CompressedRowSparseMatrix` (e.g. appending a diagonal
+  // matrix to the jacobian) as it prevents need for future reallocation.
+  void Finalize(int num_additional_elements);
+
+ private:
+  std::vector<std::vector<int>> dynamic_cols_;
+  std::vector<std::vector<double>> dynamic_values_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
new file mode 100644
index 0000000..3592557
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -0,0 +1,219 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::copy;
+using std::vector;
+
+class DynamicCompressedRowSparseMatrixTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    num_rows = 7;
+    num_cols = 4;
+
+    // The number of additional elements reserved when `Finalize` is called
+    // should have no effect on the number of rows, columns or nonzeros.
+    // Set this to some nonzero value to be sure.
+    num_additional_elements = 13;
+
+    expected_num_nonzeros = num_rows * num_cols - std::min(num_rows, num_cols);
+
+    InitialiseDenseReference();
+    InitialiseSparseMatrixReferences();
+
+    dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows,
+                                                     num_cols,
+                                                     0));
+  }
+
+  void Finalize() {
+    dcrsm->Finalize(num_additional_elements);
+  }
+
+  void InitialiseDenseReference() {
+    dense.resize(num_rows, num_cols);
+    dense.setZero();
+    int num_nonzeros = 0;
+    for (int i = 0; i < (num_rows * num_cols); ++i) {
+      const int r = i / num_cols, c = i % num_cols;
+      if (r != c) {
+        dense(r, c) = i + 1;
+        ++num_nonzeros;
+      }
+    }
+    ASSERT_EQ(num_nonzeros, expected_num_nonzeros);
+  }
+
+  void InitialiseSparseMatrixReferences() {
+    vector<int> rows, cols;
+    vector<double> values;
+    for (int i = 0; i < (num_rows * num_cols); ++i) {
+      const int r = i / num_cols, c = i % num_cols;
+      if (r != c) {
+        rows.push_back(r);
+        cols.push_back(c);
+        values.push_back(i + 1);
+      }
+    }
+    ASSERT_EQ(values.size(), expected_num_nonzeros);
+
+    tsm.reset(new TripletSparseMatrix(num_rows,
+                                      num_cols,
+                                      expected_num_nonzeros));
+    copy(rows.begin(), rows.end(), tsm->mutable_rows());
+    copy(cols.begin(), cols.end(), tsm->mutable_cols());
+    copy(values.begin(), values.end(), tsm->mutable_values());
+    tsm->set_num_nonzeros(values.size());
+
+    Matrix dense_from_tsm;
+    tsm->ToDenseMatrix(&dense_from_tsm);
+    ASSERT_TRUE((dense.array() == dense_from_tsm.array()).all());
+
+    crsm.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    Matrix dense_from_crsm;
+    crsm->ToDenseMatrix(&dense_from_crsm);
+    ASSERT_TRUE((dense.array() == dense_from_crsm.array()).all());
+  }
+
+  void InsertNonZeroEntriesFromDenseReference() {
+    for (int r = 0; r < num_rows; ++r) {
+      for (int c = 0; c < num_cols; ++c) {
+        const double& v = dense(r, c);
+        if (v != 0.0) {
+          dcrsm->InsertEntry(r, c, v);
+        }
+      }
+    }
+  }
+
+  void ExpectEmpty() {
+    EXPECT_EQ(dcrsm->num_rows(), num_rows);
+    EXPECT_EQ(dcrsm->num_cols(), num_cols);
+    EXPECT_EQ(dcrsm->num_nonzeros(), 0);
+
+    Matrix dense_from_dcrsm;
+    dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+    EXPECT_EQ(dense_from_dcrsm.rows(), num_rows);
+    EXPECT_EQ(dense_from_dcrsm.cols(), num_cols);
+    EXPECT_TRUE((dense_from_dcrsm.array() == 0.0).all());
+  }
+
+  void ExpectEqualToDenseReference() {
+    Matrix dense_from_dcrsm;
+    dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+    EXPECT_TRUE((dense.array() == dense_from_dcrsm.array()).all());
+  }
+
+  void ExpectEqualToCompressedRowSparseMatrixReference() {
+    typedef Eigen::Map<const Eigen::VectorXi> ConstIntVectorRef;
+
+    ConstIntVectorRef crsm_rows(crsm->rows(), crsm->num_rows() + 1);
+    ConstIntVectorRef dcrsm_rows(dcrsm->rows(), dcrsm->num_rows() + 1);
+    EXPECT_TRUE((crsm_rows.array() == dcrsm_rows.array()).all());
+
+    ConstIntVectorRef crsm_cols(crsm->cols(), crsm->num_nonzeros());
+    ConstIntVectorRef dcrsm_cols(dcrsm->cols(), dcrsm->num_nonzeros());
+    EXPECT_TRUE((crsm_cols.array() == dcrsm_cols.array()).all());
+
+    ConstVectorRef crsm_values(crsm->values(), crsm->num_nonzeros());
+    ConstVectorRef dcrsm_values(dcrsm->values(), dcrsm->num_nonzeros());
+    EXPECT_TRUE((crsm_values.array() == dcrsm_values.array()).all());
+  }
+
+  int num_rows;
+  int num_cols;
+
+  int num_additional_elements;
+
+  int expected_num_nonzeros;
+
+  Matrix dense;
+  std::unique_ptr<TripletSparseMatrix> tsm;
+  std::unique_ptr<CompressedRowSparseMatrix> crsm;
+
+  std::unique_ptr<DynamicCompressedRowSparseMatrix> dcrsm;
+};
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, Initialization) {
+  ExpectEmpty();
+
+  Finalize();
+  ExpectEmpty();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, InsertEntryAndFinalize) {
+  InsertNonZeroEntriesFromDenseReference();
+  ExpectEmpty();
+
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, ClearRows) {
+  InsertNonZeroEntriesFromDenseReference();
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  dcrsm->ClearRows(0, 0);
+  Finalize();
+  ExpectEqualToDenseReference();
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  dcrsm->ClearRows(0, num_rows);
+  ExpectEqualToCompressedRowSparseMatrixReference();
+
+  Finalize();
+  ExpectEmpty();
+
+  InsertNonZeroEntriesFromDenseReference();
+  dcrsm->ClearRows(1, 2);
+  Finalize();
+  dense.block(1, 0, 2, num_cols).setZero();
+  ExpectEqualToDenseReference();
+
+  InitialiseDenseReference();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..b627eb7
--- /dev/null
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -0,0 +1,521 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         mierle@gmail.com (Keir Mierle)
+
+#include <cstddef>
+
+#include <memory>
+#include "ceres/dynamic_numeric_diff_cost_function.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+const double kTolerance = 1e-6;
+
+// Takes 2 parameter blocks:
+//     parameters[0] is size 10.
+//     parameters[1] is size 5.
+// Emits 21 residuals:
+//     A: i - parameters[0][i], for i in [0,10)  -- this is 10 residuals
+//     B: parameters[0][i] - i, for i in [0,10)  -- this is another 10.
+//     C: sum(parameters[0][i]^2 - 8*parameters[0][i]) + sum(parameters[1][i])
+class MyCostFunctor {
+ public:
+  bool operator()(double const* const* parameters, double* residuals) const {
+    const double* params0 = parameters[0];
+    int r = 0;
+    for (int i = 0; i < 10; ++i) {
+      residuals[r++] = i - params0[i];
+      residuals[r++] = params0[i] - i;
+    }
+
+    double c_residual = 0.0;
+    for (int i = 0; i < 10; ++i) {
+      c_residual += pow(params0[i], 2) - 8.0 * params0[i];
+    }
+
+    const double* params1 = parameters[1];
+    for (int i = 0; i < 5; ++i) {
+      c_residual += params1[i];
+    }
+    residuals[r++] = c_residual;
+    return true;
+  }
+};
+
+TEST(DynamicNumericdiffCostFunctionTest, TestResiduals) {
+  vector<double> param_block_0(10, 0.0);
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Test residual computation.
+  vector<double> residuals(21, -100000);
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+  EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
+                                     residuals.data(),
+                                     NULL));
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(0, residuals.at(20));
+}
+
+
+TEST(DynamicNumericdiffCostFunctionTest, TestJacobian) {
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    // Check "B" Jacobian.
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_NEAR(0.0, jacobian_vect[0][i], kTolerance);
+  }
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_NEAR(0.0, jacobian_vect[1][i], kTolerance);
+  }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithFirstParameterBlockConstant) {  // NOLINT
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect[1].data());
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+
+  // Check "C" Jacobian for second parameter block.
+  for (int p = 0; p < 5; ++p) {
+    EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+    jacobian_vect[1][20 * 5 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[1][i]);
+  }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) {  // NOLINT
+  // Test the residual counting.
+  vector<double> param_block_0(10, 0.0);
+  for (int i = 0; i < 10; ++i) {
+    param_block_0[i] = 2 * i;
+  }
+  vector<double> param_block_1(5, 0.0);
+  DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+      new MyCostFunctor());
+  cost_function.AddParameterBlock(param_block_0.size());
+  cost_function.AddParameterBlock(param_block_1.size());
+  cost_function.SetNumResiduals(21);
+
+  // Prepare the residuals.
+  vector<double> residuals(21, -100000);
+
+  // Prepare the parameters.
+  vector<double*> parameter_blocks(2);
+  parameter_blocks[0] = &param_block_0[0];
+  parameter_blocks[1] = &param_block_1[0];
+
+  // Prepare the jacobian.
+  vector<vector<double>> jacobian_vect(2);
+  jacobian_vect[0].resize(21 * 10, -100000);
+  jacobian_vect[1].resize(21 * 5, -100000);
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect[0].data());
+  jacobian.push_back(NULL);
+
+  // Test jacobian computation.
+  EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+                                     residuals.data(),
+                                     jacobian.data()));
+
+  for (int r = 0; r < 10; ++r) {
+    EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+    EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+  }
+  EXPECT_EQ(420, residuals.at(20));
+  for (int p = 0; p < 10; ++p) {
+    // Check "A" Jacobian.
+    EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+    // Check "B" Jacobian.
+    EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+    jacobian_vect[0][2*p * 10 + p] = 0.0;
+    jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+  }
+
+  // Check "C" Jacobian for first parameter block.
+  for (int p = 0; p < 10; ++p) {
+    EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+    jacobian_vect[0][20 * 10 + p] = 0.0;
+  }
+  for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+    EXPECT_EQ(0.0, jacobian_vect[0][i]);
+  }
+}
+
+// Takes 3 parameter blocks:
+//     parameters[0] (x) is size 1.
+//     parameters[1] (y) is size 2.
+//     parameters[2] (z) is size 3.
+// Emits 7 residuals:
+//     A: x[0] (= sum_x)
+//     B: y[0] + 2.0 * y[1] (= sum_y)
+//     C: z[0] + 3.0 * z[1] + 6.0 * z[2] (= sum_z)
+//     D: sum_x * sum_y
+//     E: sum_y * sum_z
+//     F: sum_x * sum_z
+//     G: sum_x * sum_y * sum_z
+class MyThreeParameterCostFunctor {
+ public:
+  template <typename T>
+  bool operator()(T const* const* parameters, T* residuals) const {
+    const T* x = parameters[0];
+    const T* y = parameters[1];
+    const T* z = parameters[2];
+
+    T sum_x = x[0];
+    T sum_y = y[0] + 2.0 * y[1];
+    T sum_z = z[0] + 3.0 * z[1] + 6.0 * z[2];
+
+    residuals[0] = sum_x;
+    residuals[1] = sum_y;
+    residuals[2] = sum_z;
+    residuals[3] = sum_x * sum_y;
+    residuals[4] = sum_y * sum_z;
+    residuals[5] = sum_x * sum_z;
+    residuals[6] = sum_x * sum_y * sum_z;
+    return true;
+  }
+};
+
+class ThreeParameterCostFunctorTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    // Prepare the parameters.
+    x_.resize(1);
+    x_[0] = 0.0;
+
+    y_.resize(2);
+    y_[0] = 1.0;
+    y_[1] = 3.0;
+
+    z_.resize(3);
+    z_[0] = 2.0;
+    z_[1] = 4.0;
+    z_[2] = 6.0;
+
+    parameter_blocks_.resize(3);
+    parameter_blocks_[0] = &x_[0];
+    parameter_blocks_[1] = &y_[0];
+    parameter_blocks_[2] = &z_[0];
+
+    // Prepare the cost function.
+    typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
+      DynamicMyThreeParameterCostFunction;
+    DynamicMyThreeParameterCostFunction * cost_function =
+      new DynamicMyThreeParameterCostFunction(
+        new MyThreeParameterCostFunctor());
+    cost_function->AddParameterBlock(1);
+    cost_function->AddParameterBlock(2);
+    cost_function->AddParameterBlock(3);
+    cost_function->SetNumResiduals(7);
+
+    cost_function_.reset(cost_function);
+
+    // Setup jacobian data.
+    jacobian_vect_.resize(3);
+    jacobian_vect_[0].resize(7 * x_.size(), -100000);
+    jacobian_vect_[1].resize(7 * y_.size(), -100000);
+    jacobian_vect_[2].resize(7 * z_.size(), -100000);
+
+    // Prepare the expected residuals.
+    const double sum_x = x_[0];
+    const double sum_y = y_[0] + 2.0 * y_[1];
+    const double sum_z = z_[0] + 3.0 * z_[1] + 6.0 * z_[2];
+
+    expected_residuals_.resize(7);
+    expected_residuals_[0] = sum_x;
+    expected_residuals_[1] = sum_y;
+    expected_residuals_[2] = sum_z;
+    expected_residuals_[3] = sum_x * sum_y;
+    expected_residuals_[4] = sum_y * sum_z;
+    expected_residuals_[5] = sum_x * sum_z;
+    expected_residuals_[6] = sum_x * sum_y * sum_z;
+
+    // Prepare the expected jacobian entries.
+    expected_jacobian_x_.resize(7);
+    expected_jacobian_x_[0] = 1.0;
+    expected_jacobian_x_[1] = 0.0;
+    expected_jacobian_x_[2] = 0.0;
+    expected_jacobian_x_[3] = sum_y;
+    expected_jacobian_x_[4] = 0.0;
+    expected_jacobian_x_[5] = sum_z;
+    expected_jacobian_x_[6] = sum_y * sum_z;
+
+    expected_jacobian_y_.resize(14);
+    expected_jacobian_y_[0] = 0.0;
+    expected_jacobian_y_[1] = 0.0;
+    expected_jacobian_y_[2] = 1.0;
+    expected_jacobian_y_[3] = 2.0;
+    expected_jacobian_y_[4] = 0.0;
+    expected_jacobian_y_[5] = 0.0;
+    expected_jacobian_y_[6] = sum_x;
+    expected_jacobian_y_[7] = 2.0 * sum_x;
+    expected_jacobian_y_[8] = sum_z;
+    expected_jacobian_y_[9] = 2.0 * sum_z;
+    expected_jacobian_y_[10] = 0.0;
+    expected_jacobian_y_[11] = 0.0;
+    expected_jacobian_y_[12] = sum_x * sum_z;
+    expected_jacobian_y_[13] = 2.0 * sum_x * sum_z;
+
+    expected_jacobian_z_.resize(21);
+    expected_jacobian_z_[0] = 0.0;
+    expected_jacobian_z_[1] = 0.0;
+    expected_jacobian_z_[2] = 0.0;
+    expected_jacobian_z_[3] = 0.0;
+    expected_jacobian_z_[4] = 0.0;
+    expected_jacobian_z_[5] = 0.0;
+    expected_jacobian_z_[6] = 1.0;
+    expected_jacobian_z_[7] = 3.0;
+    expected_jacobian_z_[8] = 6.0;
+    expected_jacobian_z_[9] = 0.0;
+    expected_jacobian_z_[10] = 0.0;
+    expected_jacobian_z_[11] = 0.0;
+    expected_jacobian_z_[12] = sum_y;
+    expected_jacobian_z_[13] = 3.0 * sum_y;
+    expected_jacobian_z_[14] = 6.0 * sum_y;
+    expected_jacobian_z_[15] = sum_x;
+    expected_jacobian_z_[16] = 3.0 * sum_x;
+    expected_jacobian_z_[17] = 6.0 * sum_x;
+    expected_jacobian_z_[18] = sum_x * sum_y;
+    expected_jacobian_z_[19] = 3.0 * sum_x * sum_y;
+    expected_jacobian_z_[20] = 6.0 * sum_x * sum_y;
+  }
+
+ protected:
+  vector<double> x_;
+  vector<double> y_;
+  vector<double> z_;
+
+  vector<double*> parameter_blocks_;
+
+  std::unique_ptr<CostFunction> cost_function_;
+
+  vector<vector<double>> jacobian_vect_;
+
+  vector<double> expected_residuals_;
+
+  vector<double> expected_jacobian_x_;
+  vector<double> expected_jacobian_y_;
+  vector<double> expected_jacobian_z_;
+};
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
+  vector<double> residuals(7, -100000);
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       NULL));
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterJacobian) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithFirstAndLastParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[1].data());
+  jacobian.push_back(NULL);
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 14; ++i) {
+    EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+  }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+       ThreeParameterJacobianWithSecondParameterBlockConstant) {
+  vector<double> residuals(7, -100000);
+
+  vector<double*> jacobian;
+  jacobian.push_back(jacobian_vect_[0].data());
+  jacobian.push_back(NULL);
+  jacobian.push_back(jacobian_vect_[2].data());
+
+  EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+                                       residuals.data(),
+                                       jacobian.data()));
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_EQ(expected_residuals_[i], residuals[i]);
+  }
+
+  for (int i = 0; i < 7; ++i) {
+    EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+  }
+
+  for (int i = 0; i < 21; ++i) {
+    EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
new file mode 100644
index 0000000..f966083
--- /dev/null
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -0,0 +1,284 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <memory>
+#include <sstream>
+
+#include "Eigen/SparseCore"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cxsparse.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
+
+namespace ceres {
+namespace internal {
+
+DynamicSparseNormalCholeskySolver::DynamicSparseNormalCholeskySolver(
+    const LinearSolver::Options& options)
+    : options_(options) {}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImpl(
+    CompressedRowSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  const int num_cols = A->num_cols();
+  VectorRef(x, num_cols).setZero();
+  A->LeftMultiply(b, x);
+
+  if (per_solve_options.D != nullptr) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    std::unique_ptr<CompressedRowSparseMatrix> regularizer;
+    if (!A->col_blocks().empty()) {
+      regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+          per_solve_options.D, A->col_blocks()));
+    } else {
+      regularizer.reset(
+          new CompressedRowSparseMatrix(per_solve_options.D, num_cols));
+    }
+    A->AppendRows(*regularizer);
+  }
+
+  LinearSolver::Summary summary;
+  switch (options_.sparse_linear_algebra_library_type) {
+    case SUITE_SPARSE:
+      summary = SolveImplUsingSuiteSparse(A, x);
+      break;
+    case CX_SPARSE:
+      summary = SolveImplUsingCXSparse(A, x);
+      break;
+    case EIGEN_SPARSE:
+      summary = SolveImplUsingEigen(A, x);
+      break;
+    default:
+      LOG(FATAL) << "Unknown sparse linear algebra library : "
+                 << options_.sparse_linear_algebra_library_type;
+  }
+
+  if (per_solve_options.D != nullptr) {
+    A->DeleteRows(num_cols);
+  }
+
+  return summary;
+}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImplUsingEigen(
+    CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+      "because Ceres was not built with support for "
+      "Eigen's SimplicialLDLT decomposition. "
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+  return summary;
+
+#else
+
+  EventLogger event_logger("DynamicSparseNormalCholeskySolver::Eigen::Solve");
+
+  Eigen::MappedSparseMatrix<double, Eigen::RowMajor> a(A->num_rows(),
+                                                       A->num_cols(),
+                                                       A->num_nonzeros(),
+                                                       A->mutable_rows(),
+                                                       A->mutable_cols(),
+                                                       A->mutable_values());
+
+  Eigen::SparseMatrix<double> lhs = a.transpose() * a;
+  Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver;
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  solver.analyzePattern(lhs);
+  if (VLOG_IS_ON(2)) {
+    std::stringstream ss;
+    solver.dumpMemory(ss);
+    VLOG(2) << "Symbolic Analysis\n" << ss.str();
+  }
+
+  event_logger.AddEvent("Analyze");
+  if (solver.info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    summary.message = "Eigen failure. Unable to find symbolic factorization.";
+    return summary;
+  }
+
+  solver.factorize(lhs);
+  event_logger.AddEvent("Factorize");
+  if (solver.info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to find numeric factorization.";
+    return summary;
+  }
+
+  const Vector rhs = VectorRef(rhs_and_solution, lhs.cols());
+  VectorRef(rhs_and_solution, lhs.cols()) = solver.solve(rhs);
+  event_logger.AddEvent("Solve");
+  if (solver.info() != Eigen::Success) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "Eigen failure. Unable to do triangular solve.";
+    return summary;
+  }
+
+  return summary;
+#endif  // CERES_USE_EIGEN_SPARSE
+}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImplUsingCXSparse(
+    CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifdef CERES_NO_CXSPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
+      "because Ceres was not built with support for CXSparse. "
+      "This requires enabling building with -DCXSPARSE=ON.";
+
+  return summary;
+
+#else
+  EventLogger event_logger(
+      "DynamicSparseNormalCholeskySolver::CXSparse::Solve");
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  CXSparse cxsparse;
+
+  // Wrap the augmented Jacobian in a compressed sparse column matrix.
+  cs_di a_transpose = cxsparse.CreateSparseMatrixTransposeView(A);
+
+  // Compute the normal equations. J'J delta = J'f and solve them
+  // using a sparse Cholesky factorization. Notice that when compared
+  // to SuiteSparse we have to explicitly compute the transpose of Jt,
+  // and then the normal equations before they can be
+  // factorized. CHOLMOD/SuiteSparse on the other hand can just work
+  // off of Jt to compute the Cholesky factorization of the normal
+  // equations.
+  cs_di* a = cxsparse.TransposeMatrix(&a_transpose);
+  cs_di* lhs = cxsparse.MatrixMatrixMultiply(&a_transpose, a);
+  cxsparse.Free(a);
+  event_logger.AddEvent("NormalEquations");
+
+  if (!cxsparse.SolveCholesky(lhs, rhs_and_solution)) {
+    summary.termination_type = LINEAR_SOLVER_FAILURE;
+    summary.message = "CXSparse::SolveCholesky failed";
+  }
+  event_logger.AddEvent("Solve");
+
+  cxsparse.Free(lhs);
+  event_logger.AddEvent("TearDown");
+  return summary;
+#endif
+}
+
+LinearSolver::Summary
+DynamicSparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+    CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+  summary.message =
+      "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
+      "because Ceres was not built with support for SuiteSparse. "
+      "This requires enabling building with -DSUITESPARSE=ON.";
+  return summary;
+
+#else
+
+  EventLogger event_logger(
+      "DynamicSparseNormalCholeskySolver::SuiteSparse::Solve");
+  LinearSolver::Summary summary;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.num_iterations = 1;
+  summary.message = "Success.";
+
+  SuiteSparse ss;
+  const int num_cols = A->num_cols();
+  cholmod_sparse lhs = ss.CreateSparseMatrixTransposeView(A);
+  event_logger.AddEvent("Setup");
+  cholmod_factor* factor = ss.AnalyzeCholesky(&lhs, &summary.message);
+  event_logger.AddEvent("Analysis");
+
+  if (factor == nullptr) {
+    summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+    return summary;
+  }
+
+  summary.termination_type = ss.Cholesky(&lhs, factor, &summary.message);
+  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+    cholmod_dense cholmod_rhs =
+        ss.CreateDenseVectorView(rhs_and_solution, num_cols);
+    cholmod_dense* solution = ss.Solve(factor, &cholmod_rhs, &summary.message);
+    event_logger.AddEvent("Solve");
+    if (solution != nullptr) {
+      memcpy(
+          rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
+      ss.Free(solution);
+    } else {
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+    }
+  }
+
+  ss.Free(factor);
+  event_logger.AddEvent("Teardown");
+  return summary;
+
+#endif
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver.h b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
new file mode 100644
index 0000000..17be90c
--- /dev/null
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A solver for sparse linear least squares problem based on solving
+// the normal equations via a sparse cholesky factorization.
+
+#ifndef CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+
+// A variant of SparseNormalCholeskySolver in the case where matrix
+// sparsity is not constant across calls to Solve. This means that
+// there is no benefit to symbolically factorizing the matrix and
+// caching this factorization.
+//
+// TODO(alex): Add support for Accelerate sparse solvers:
+// https://github.com/ceres-solver/ceres-solver/issues/397
+class DynamicSparseNormalCholeskySolver
+    : public CompressedRowSparseMatrixSolver {
+ public:
+  explicit DynamicSparseNormalCholeskySolver(
+      const LinearSolver::Options& options);
+  virtual ~DynamicSparseNormalCholeskySolver() {}
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      CompressedRowSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& options,
+      double* x);
+
+  LinearSolver::Summary SolveImplUsingSuiteSparse(
+      CompressedRowSparseMatrix* A,
+      double* rhs_and_solution);
+
+  LinearSolver::Summary SolveImplUsingCXSparse(
+      CompressedRowSparseMatrix* A,
+      double* rhs_and_solution);
+
+  LinearSolver::Summary SolveImplUsingEigen(
+      CompressedRowSparseMatrix* A,
+      double* rhs_and_solution);
+
+  const LinearSolver::Options options_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
new file mode 100644
index 0000000..4fe06f8
--- /dev/null
+++ b/internal/ceres/dynamic_sparse_normal_cholesky_solver_test.cc
@@ -0,0 +1,131 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#include "Eigen/Cholesky"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): These tests needs to be re-written to be more
+// thorough, they do not really test the dynamic nature of the
+// sparsity.
+class DynamicSparseNormalCholeskySolverTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(1));
+    A_.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(
+        *down_cast<TripletSparseMatrix*>(problem->A.get())));
+    b_.reset(problem->b.release());
+    D_.reset(problem->D.release());
+  }
+
+  void TestSolver(const LinearSolver::Options& options, double* D) {
+    Matrix dense_A;
+    A_->ToDenseMatrix(&dense_A);
+    Matrix lhs = dense_A.transpose() * dense_A;
+    if (D != NULL) {
+      lhs += (ConstVectorRef(D, A_->num_cols()).array() *
+              ConstVectorRef(D, A_->num_cols()).array())
+                 .matrix()
+                 .asDiagonal();
+    }
+
+    Vector rhs(A_->num_cols());
+    rhs.setZero();
+    A_->LeftMultiply(b_.get(), rhs.data());
+    Vector expected_solution = lhs.llt().solve(rhs);
+
+    std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D;
+    Vector actual_solution(A_->num_cols());
+    LinearSolver::Summary summary;
+    summary = solver->Solve(
+        A_.get(), b_.get(), per_solve_options, actual_solution.data());
+
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+    for (int i = 0; i < A_->num_cols(); ++i) {
+      EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
+          << "\nExpected: " << expected_solution.transpose()
+          << "\nActual: " << actual_solution.transpose();
+    }
+  }
+
+  void TestSolver(
+      const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+    LinearSolver::Options options;
+    options.type = SPARSE_NORMAL_CHOLESKY;
+    options.dynamic_sparsity = true;
+    options.sparse_linear_algebra_library_type =
+        sparse_linear_algebra_library_type;
+    ContextImpl context;
+    options.context = &context;
+    TestSolver(options, NULL);
+    TestSolver(options, D_.get());
+  }
+
+  std::unique_ptr<CompressedRowSparseMatrix> A_;
+  std::unique_ptr<double[]> b_;
+  std::unique_ptr<double[]> D_;
+};
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, SuiteSparse) {
+  TestSolver(SUITE_SPARSE);
+}
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, CXSparse) {
+  TestSolver(CX_SPARSE);
+}
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(DynamicSparseNormalCholeskySolverTest, Eigen) {
+  TestSolver(EIGEN_SPARSE);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/dynamic_sparsity_test.cc b/internal/ceres/dynamic_sparsity_test.cc
new file mode 100644
index 0000000..5fe60f4
--- /dev/null
+++ b/internal/ceres/dynamic_sparsity_test.cc
@@ -0,0 +1,449 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//
+// Based on examples/ellipse_approximation.cc
+
+#include <cmath>
+#include <vector>
+#include "ceres/ceres.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Data generated with the following Python code.
+//   import numpy as np
+//   np.random.seed(1337)
+//   t = np.linspace(0.0, 2.0 * np.pi, 212, endpoint=False)
+//   t += 2.0 * np.pi * 0.01 * np.random.randn(t.size)
+//   theta = np.deg2rad(15)
+//   a, b = np.cos(theta), np.sin(theta)
+//   R = np.array([[a, -b],
+//                 [b, a]])
+//   Y = np.dot(np.c_[4.0 * np.cos(t), np.sin(t)], R.T)
+
+const int kYRows = 212;
+const int kYCols = 2;
+const double kYData[kYRows * kYCols] = {
+  +3.871364e+00, +9.916027e-01,
+  +3.864003e+00, +1.034148e+00,
+  +3.850651e+00, +1.072202e+00,
+  +3.868350e+00, +1.014408e+00,
+  +3.796381e+00, +1.153021e+00,
+  +3.857138e+00, +1.056102e+00,
+  +3.787532e+00, +1.162215e+00,
+  +3.704477e+00, +1.227272e+00,
+  +3.564711e+00, +1.294959e+00,
+  +3.754363e+00, +1.191948e+00,
+  +3.482098e+00, +1.322725e+00,
+  +3.602777e+00, +1.279658e+00,
+  +3.585433e+00, +1.286858e+00,
+  +3.347505e+00, +1.356415e+00,
+  +3.220855e+00, +1.378914e+00,
+  +3.558808e+00, +1.297174e+00,
+  +3.403618e+00, +1.343809e+00,
+  +3.179828e+00, +1.384721e+00,
+  +3.054789e+00, +1.398759e+00,
+  +3.294153e+00, +1.366808e+00,
+  +3.247312e+00, +1.374813e+00,
+  +2.988547e+00, +1.404247e+00,
+  +3.114508e+00, +1.392698e+00,
+  +2.899226e+00, +1.409802e+00,
+  +2.533256e+00, +1.414778e+00,
+  +2.654773e+00, +1.415909e+00,
+  +2.565100e+00, +1.415313e+00,
+  +2.976456e+00, +1.405118e+00,
+  +2.484200e+00, +1.413640e+00,
+  +2.324751e+00, +1.407476e+00,
+  +1.930468e+00, +1.378221e+00,
+  +2.329017e+00, +1.407688e+00,
+  +1.760640e+00, +1.360319e+00,
+  +2.147375e+00, +1.396603e+00,
+  +1.741989e+00, +1.358178e+00,
+  +1.743859e+00, +1.358394e+00,
+  +1.557372e+00, +1.335208e+00,
+  +1.280551e+00, +1.295087e+00,
+  +1.429880e+00, +1.317546e+00,
+  +1.213485e+00, +1.284400e+00,
+  +9.168172e-01, +1.232870e+00,
+  +1.311141e+00, +1.299839e+00,
+  +1.231969e+00, +1.287382e+00,
+  +7.453773e-01, +1.200049e+00,
+  +6.151587e-01, +1.173683e+00,
+  +5.935666e-01, +1.169193e+00,
+  +2.538707e-01, +1.094227e+00,
+  +6.806136e-01, +1.187089e+00,
+  +2.805447e-01, +1.100405e+00,
+  +6.184807e-01, +1.174371e+00,
+  +1.170550e-01, +1.061762e+00,
+  +2.890507e-01, +1.102365e+00,
+  +3.834234e-01, +1.123772e+00,
+  +3.980161e-04, +1.033061e+00,
+  -3.651680e-01, +9.370367e-01,
+  -8.386351e-01, +7.987201e-01,
+  -8.105704e-01, +8.073702e-01,
+  -8.735139e-01, +7.878886e-01,
+  -9.913836e-01, +7.506100e-01,
+  -8.784011e-01, +7.863636e-01,
+  -1.181440e+00, +6.882566e-01,
+  -1.229556e+00, +6.720191e-01,
+  -1.035839e+00, +7.362765e-01,
+  -8.031520e-01, +8.096470e-01,
+  -1.539136e+00, +5.629549e-01,
+  -1.755423e+00, +4.817306e-01,
+  -1.337589e+00, +6.348763e-01,
+  -1.836966e+00, +4.499485e-01,
+  -1.913367e+00, +4.195617e-01,
+  -2.126467e+00, +3.314900e-01,
+  -1.927625e+00, +4.138238e-01,
+  -2.339862e+00, +2.379074e-01,
+  -1.881736e+00, +4.322152e-01,
+  -2.116753e+00, +3.356163e-01,
+  -2.255733e+00, +2.754930e-01,
+  -2.555834e+00, +1.368473e-01,
+  -2.770277e+00, +2.895711e-02,
+  -2.563376e+00, +1.331890e-01,
+  -2.826715e+00, -9.000818e-04,
+  -2.978191e+00, -8.457804e-02,
+  -3.115855e+00, -1.658786e-01,
+  -2.982049e+00, -8.678322e-02,
+  -3.307892e+00, -2.902083e-01,
+  -3.038346e+00, -1.194222e-01,
+  -3.190057e+00, -2.122060e-01,
+  -3.279086e+00, -2.705777e-01,
+  -3.322028e+00, -2.999889e-01,
+  -3.122576e+00, -1.699965e-01,
+  -3.551973e+00, -4.768674e-01,
+  -3.581866e+00, -5.032175e-01,
+  -3.497799e+00, -4.315203e-01,
+  -3.565384e+00, -4.885602e-01,
+  -3.699493e+00, -6.199815e-01,
+  -3.585166e+00, -5.061925e-01,
+  -3.758914e+00, -6.918275e-01,
+  -3.741104e+00, -6.689131e-01,
+  -3.688331e+00, -6.077239e-01,
+  -3.810425e+00, -7.689015e-01,
+  -3.791829e+00, -7.386911e-01,
+  -3.789951e+00, -7.358189e-01,
+  -3.823100e+00, -7.918398e-01,
+  -3.857021e+00, -8.727074e-01,
+  -3.858250e+00, -8.767645e-01,
+  -3.872100e+00, -9.563174e-01,
+  -3.864397e+00, -1.032630e+00,
+  -3.846230e+00, -1.081669e+00,
+  -3.834799e+00, -1.102536e+00,
+  -3.866684e+00, -1.022901e+00,
+  -3.808643e+00, -1.139084e+00,
+  -3.868840e+00, -1.011569e+00,
+  -3.791071e+00, -1.158615e+00,
+  -3.797999e+00, -1.151267e+00,
+  -3.696278e+00, -1.232314e+00,
+  -3.779007e+00, -1.170504e+00,
+  -3.622855e+00, -1.270793e+00,
+  -3.647249e+00, -1.259166e+00,
+  -3.655412e+00, -1.255042e+00,
+  -3.573218e+00, -1.291696e+00,
+  -3.638019e+00, -1.263684e+00,
+  -3.498409e+00, -1.317750e+00,
+  -3.304143e+00, -1.364970e+00,
+  -3.183001e+00, -1.384295e+00,
+  -3.202456e+00, -1.381599e+00,
+  -3.244063e+00, -1.375332e+00,
+  -3.233308e+00, -1.377019e+00,
+  -3.060112e+00, -1.398264e+00,
+  -3.078187e+00, -1.396517e+00,
+  -2.689594e+00, -1.415761e+00,
+  -2.947662e+00, -1.407039e+00,
+  -2.854490e+00, -1.411860e+00,
+  -2.660499e+00, -1.415900e+00,
+  -2.875955e+00, -1.410930e+00,
+  -2.675385e+00, -1.415848e+00,
+  -2.813155e+00, -1.413363e+00,
+  -2.417673e+00, -1.411512e+00,
+  -2.725461e+00, -1.415373e+00,
+  -2.148334e+00, -1.396672e+00,
+  -2.108972e+00, -1.393738e+00,
+  -2.029905e+00, -1.387302e+00,
+  -2.046214e+00, -1.388687e+00,
+  -2.057402e+00, -1.389621e+00,
+  -1.650250e+00, -1.347160e+00,
+  -1.806764e+00, -1.365469e+00,
+  -1.206973e+00, -1.283343e+00,
+  -8.029259e-01, -1.211308e+00,
+  -1.229551e+00, -1.286993e+00,
+  -1.101507e+00, -1.265754e+00,
+  -9.110645e-01, -1.231804e+00,
+  -1.110046e+00, -1.267211e+00,
+  -8.465274e-01, -1.219677e+00,
+  -7.594163e-01, -1.202818e+00,
+  -8.023823e-01, -1.211203e+00,
+  -3.732519e-01, -1.121494e+00,
+  -1.918373e-01, -1.079668e+00,
+  -4.671988e-01, -1.142253e+00,
+  -4.033645e-01, -1.128215e+00,
+  -1.920740e-01, -1.079724e+00,
+  -3.022157e-01, -1.105389e+00,
+  -1.652831e-01, -1.073354e+00,
+  +4.671625e-01, -9.085886e-01,
+  +5.940178e-01, -8.721832e-01,
+  +3.147557e-01, -9.508290e-01,
+  +6.383631e-01, -8.591867e-01,
+  +9.888923e-01, -7.514088e-01,
+  +7.076339e-01, -8.386023e-01,
+  +1.326682e+00, -6.386698e-01,
+  +1.149834e+00, -6.988221e-01,
+  +1.257742e+00, -6.624207e-01,
+  +1.492352e+00, -5.799632e-01,
+  +1.595574e+00, -5.421766e-01,
+  +1.240173e+00, -6.684113e-01,
+  +1.706612e+00, -5.004442e-01,
+  +1.873984e+00, -4.353002e-01,
+  +1.985633e+00, -3.902561e-01,
+  +1.722880e+00, -4.942329e-01,
+  +2.095182e+00, -3.447402e-01,
+  +2.018118e+00, -3.768991e-01,
+  +2.422702e+00, -1.999563e-01,
+  +2.370611e+00, -2.239326e-01,
+  +2.152154e+00, -3.205250e-01,
+  +2.525121e+00, -1.516499e-01,
+  +2.422116e+00, -2.002280e-01,
+  +2.842806e+00, +9.536372e-03,
+  +3.030128e+00, +1.146027e-01,
+  +2.888424e+00, +3.433444e-02,
+  +2.991609e+00, +9.226409e-02,
+  +2.924807e+00, +5.445844e-02,
+  +3.007772e+00, +1.015875e-01,
+  +2.781973e+00, -2.282382e-02,
+  +3.164737e+00, +1.961781e-01,
+  +3.237671e+00, +2.430139e-01,
+  +3.046123e+00, +1.240014e-01,
+  +3.414834e+00, +3.669060e-01,
+  +3.436591e+00, +3.833600e-01,
+  +3.626207e+00, +5.444311e-01,
+  +3.223325e+00, +2.336361e-01,
+  +3.511963e+00, +4.431060e-01,
+  +3.698380e+00, +6.187442e-01,
+  +3.670244e+00, +5.884943e-01,
+  +3.558833e+00, +4.828230e-01,
+  +3.661807e+00, +5.797689e-01,
+  +3.767261e+00, +7.030893e-01,
+  +3.801065e+00, +7.532650e-01,
+  +3.828523e+00, +8.024454e-01,
+  +3.840719e+00, +8.287032e-01,
+  +3.848748e+00, +8.485921e-01,
+  +3.865801e+00, +9.066551e-01,
+  +3.870983e+00, +9.404873e-01,
+  +3.870263e+00, +1.001884e+00,
+  +3.864462e+00, +1.032374e+00,
+  +3.870542e+00, +9.996121e-01,
+  +3.865424e+00, +1.028474e+00
+};
+
+ConstMatrixRef kY(kYData, kYRows, kYCols);
+
+class PointToLineSegmentContourCostFunction : public CostFunction {
+ public:
+  PointToLineSegmentContourCostFunction(const int num_segments,
+                                        const Eigen::Vector2d& y)
+      : num_segments_(num_segments), y_(y) {
+    // The first parameter is the preimage position.
+    mutable_parameter_block_sizes()->push_back(1);
+    // The next parameters are the control points for the line segment contour.
+    for (int i = 0; i < num_segments_; ++i) {
+      mutable_parameter_block_sizes()->push_back(2);
+    }
+    set_num_residuals(2);
+  }
+
+  virtual bool Evaluate(const double* const* x,
+                        double* residuals,
+                        double** jacobians) const {
+    // Convert the preimage position `t` into a segment index `i0` and the
+    // line segment interpolation parameter `u`. `i1` is the index of the next
+    // control point.
+    const double t = ModuloNumSegments(*x[0]);
+    CHECK_GE(t, 0.0);
+    CHECK_LT(t, num_segments_);
+    const int i0 = floor(t), i1 = (i0 + 1) % num_segments_;
+    const double u = t - i0;
+
+    // Linearly interpolate between control points `i0` and `i1`.
+    residuals[0] = y_[0] - ((1.0 - u) * x[1 + i0][0] + u * x[1 + i1][0]);
+    residuals[1] = y_[1] - ((1.0 - u) * x[1 + i0][1] + u * x[1 + i1][1]);
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    if (jacobians[0] != NULL) {
+      jacobians[0][0] = x[1 + i0][0] - x[1 + i1][0];
+      jacobians[0][1] = x[1 + i0][1] - x[1 + i1][1];
+    }
+    for (int i = 0; i < num_segments_; ++i) {
+      if (jacobians[i + 1] != NULL) {
+        MatrixRef(jacobians[i + 1], 2, 2).setZero();
+        if (i == i0) {
+          jacobians[i + 1][0] = -(1.0 - u);
+          jacobians[i + 1][3] = -(1.0 - u);
+        } else if (i == i1) {
+          jacobians[i + 1][0] = -u;
+          jacobians[i + 1][3] = -u;
+        }
+      }
+    }
+    return true;
+  }
+
+  static CostFunction* Create(const int num_segments, const Eigen::Vector2d& y) {
+    return new PointToLineSegmentContourCostFunction(num_segments, y);
+  }
+
+ private:
+  inline double ModuloNumSegments(const double t) const {
+    return t - num_segments_ * floor(t / num_segments_);
+  }
+
+  const int num_segments_;
+  const Eigen::Vector2d y_;
+};
+
+class EuclideanDistanceFunctor {
+ public:
+  explicit EuclideanDistanceFunctor(const double sqrt_weight)
+      : sqrt_weight_(sqrt_weight) {}
+
+  template <typename T>
+  bool operator()(const T* x0, const T* x1, T* residuals) const {
+    residuals[0] = sqrt_weight_ * (x0[0] - x1[0]);
+    residuals[1] = sqrt_weight_ * (x0[1] - x1[1]);
+    return true;
+  }
+
+  static CostFunction* Create(const double sqrt_weight) {
+    return new AutoDiffCostFunction<EuclideanDistanceFunctor, 2, 2, 2>(
+        new EuclideanDistanceFunctor(sqrt_weight));
+  }
+
+ private:
+  const double sqrt_weight_;
+};
+
+TEST(DynamicSparsity, StaticAndDynamicSparsityProduceSameSolution) {
+  // Skip test if there is no sparse linear algebra library.
+  if (!IsSparseLinearAlgebraLibraryTypeAvailable(SUITE_SPARSE) &&
+      !IsSparseLinearAlgebraLibraryTypeAvailable(CX_SPARSE) &&
+      !IsSparseLinearAlgebraLibraryTypeAvailable(EIGEN_SPARSE)) {
+    return;
+  }
+
+  // Problem configuration.
+  const int num_segments = 151;
+  const double regularization_weight = 1e-2;
+
+  // `X` is the matrix of control points which make up the contour of line
+  // segments. The number of control points is equal to the number of line
+  // segments because the contour is closed.
+  //
+  // Initialize `X` to points on the unit circle.
+  Vector w(num_segments + 1);
+  w.setLinSpaced(num_segments + 1, 0.0, 2.0 * M_PI);
+  w.conservativeResize(num_segments);
+  Matrix X(num_segments, 2);
+  X.col(0) = w.array().cos();
+  X.col(1) = w.array().sin();
+
+  // Each data point has an associated preimage position on the line segment
+  // contour. For each data point we initialize the preimage positions to
+  // the index of the closest control point.
+  const int num_observations = kY.rows();
+  Vector t(num_observations);
+  for (int i = 0; i < num_observations; ++i) {
+    (X.rowwise() - kY.row(i)).rowwise().squaredNorm().minCoeff(&t[i]);
+  }
+
+  Problem problem;
+
+  // For each data point add a residual which measures its distance to its
+  // corresponding position on the line segment contour.
+  std::vector<double*> parameter_blocks(1 + num_segments);
+  parameter_blocks[0] = NULL;
+  for (int i = 0; i < num_segments; ++i) {
+    parameter_blocks[i + 1] = X.data() + 2 * i;
+  }
+  for (int i = 0; i < num_observations; ++i) {
+    parameter_blocks[0] = &t[i];
+    problem.AddResidualBlock(
+        PointToLineSegmentContourCostFunction::Create(num_segments, kY.row(i)),
+        NULL,
+        parameter_blocks);
+  }
+
+  // Add regularization to minimize the length of the line segment contour.
+  for (int i = 0; i < num_segments; ++i) {
+    problem.AddResidualBlock(
+        EuclideanDistanceFunctor::Create(sqrt(regularization_weight)),
+        NULL,
+        X.data() + 2 * i,
+        X.data() + 2 * ((i + 1) % num_segments));
+  }
+
+  Solver::Options options;
+  options.max_num_iterations = 100;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+
+  // First, solve `X` and `t` jointly with dynamic_sparsity = true.
+  Matrix X0 = X;
+  Vector t0 = t;
+  options.dynamic_sparsity = false;
+  Solver::Summary static_summary;
+  Solve(options, &problem, &static_summary);
+  EXPECT_EQ(static_summary.termination_type, CONVERGENCE)
+      << static_summary.FullReport();
+
+  X = X0;
+  t = t0;
+  options.dynamic_sparsity = true;
+  Solver::Summary dynamic_summary;
+  Solve(options, &problem, &dynamic_summary);
+  EXPECT_EQ(dynamic_summary.termination_type, CONVERGENCE)
+      << dynamic_summary.FullReport();
+
+  EXPECT_NEAR(static_summary.final_cost,
+              dynamic_summary.final_cost,
+              std::numeric_limits<double>::epsilon())
+      << "Static: \n"
+      << static_summary.FullReport() << "\nDynamic: \n"
+      << dynamic_summary.FullReport();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/eigensparse.cc b/internal/ceres/eigensparse.cc
new file mode 100644
index 0000000..9847bfd
--- /dev/null
+++ b/internal/ceres/eigensparse.cc
@@ -0,0 +1,207 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/eigensparse.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+#include <sstream>
+#include "Eigen/SparseCholesky"
+#include "Eigen/SparseCore"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): Use enable_if to clean up the implementations
+// for when Scalar == double.
+template <typename Solver>
+class EigenSparseCholeskyTemplate : public SparseCholesky {
+ public:
+  EigenSparseCholeskyTemplate() : analyzed_(false) {}
+  virtual ~EigenSparseCholeskyTemplate() {}
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const {
+    return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+  }
+
+  virtual LinearSolverTerminationType Factorize(
+      const Eigen::SparseMatrix<typename Solver::Scalar>& lhs,
+      std::string* message) {
+    if (!analyzed_) {
+      solver_.analyzePattern(lhs);
+
+      if (VLOG_IS_ON(2)) {
+        std::stringstream ss;
+        solver_.dumpMemory(ss);
+        VLOG(2) << "Symbolic Analysis\n" << ss.str();
+      }
+
+      if (solver_.info() != Eigen::Success) {
+        *message = "Eigen failure. Unable to find symbolic factorization.";
+        return LINEAR_SOLVER_FATAL_ERROR;
+      }
+
+      analyzed_ = true;
+    }
+
+    solver_.factorize(lhs);
+    if (solver_.info() != Eigen::Success) {
+      *message = "Eigen failure. Unable to find numeric factorization.";
+      return LINEAR_SOLVER_FAILURE;
+    }
+    return LINEAR_SOLVER_SUCCESS;
+  }
+
+  LinearSolverTerminationType Solve(const double* rhs_ptr,
+                                    double* solution_ptr,
+                                    std::string* message) {
+    CHECK(analyzed_) << "Solve called without a call to Factorize first.";
+
+    scalar_rhs_ = ConstVectorRef(rhs_ptr, solver_.cols())
+                      .template cast<typename Solver::Scalar>();
+
+    // The two casts are needed if the Scalar in this class is not
+    // double. For code simplicity we are going to assume that Eigen
+    // is smart enough to figure out that casting a double Vector to a
+    // double Vector is a straight copy. If this turns into a
+    // performance bottleneck (unlikely), we can revisit this.
+    scalar_solution_ = solver_.solve(scalar_rhs_);
+    VectorRef(solution_ptr, solver_.cols()) =
+        scalar_solution_.template cast<double>();
+
+    if (solver_.info() != Eigen::Success) {
+      *message = "Eigen failure. Unable to do triangular solve.";
+      return LINEAR_SOLVER_FAILURE;
+    }
+    return LINEAR_SOLVER_SUCCESS;
+  }
+
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message) {
+    CHECK_EQ(lhs->storage_type(), StorageType());
+
+    typename Solver::Scalar* values_ptr = NULL;
+    if (std::is_same<typename Solver::Scalar, double>::value) {
+      values_ptr =
+          reinterpret_cast<typename Solver::Scalar*>(lhs->mutable_values());
+    } else {
+      // In the case where the scalar used in this class is not
+      // double. In that case, make a copy of the values array in the
+      // CompressedRowSparseMatrix and cast it to Scalar along the way.
+      values_ = ConstVectorRef(lhs->values(), lhs->num_nonzeros())
+                    .cast<typename Solver::Scalar>();
+      values_ptr = values_.data();
+    }
+
+    Eigen::MappedSparseMatrix<typename Solver::Scalar, Eigen::ColMajor>
+        eigen_lhs(lhs->num_rows(),
+                  lhs->num_rows(),
+                  lhs->num_nonzeros(),
+                  lhs->mutable_rows(),
+                  lhs->mutable_cols(),
+                  values_ptr);
+    return Factorize(eigen_lhs, message);
+  }
+
+ private:
+  Eigen::Matrix<typename Solver::Scalar, Eigen::Dynamic, 1> values_,
+      scalar_rhs_, scalar_solution_;
+  bool analyzed_;
+  Solver solver_;
+};
+
+std::unique_ptr<SparseCholesky> EigenSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
+
+  // The preprocessor gymnastics here are dealing with the fact that
+  // before version 3.2.2, Eigen did not support a third template
+  // parameter to specify the ordering and it always defaults to AMD.
+#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+                                Eigen::Upper,
+                                Eigen::AMDOrdering<int>>
+      WithAMDOrdering;
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+                                Eigen::Upper,
+                                Eigen::NaturalOrdering<int>>
+      WithNaturalOrdering;
+  if (ordering_type == AMD) {
+    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+  } else {
+    sparse_cholesky.reset(
+        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+  }
+#else
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Upper>
+      WithAMDOrdering;
+  sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+#endif
+  return sparse_cholesky;
+}
+
+EigenSparseCholesky::~EigenSparseCholesky() {}
+
+std::unique_ptr<SparseCholesky> FloatEigenSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
+  // The preprocessor gymnastics here are dealing with the fact that
+  // before version 3.2.2, Eigen did not support a third template
+  // parameter to specify the ordering and it always defaults to AMD.
+#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+                                Eigen::Upper,
+                                Eigen::AMDOrdering<int>>
+      WithAMDOrdering;
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+                                Eigen::Upper,
+                                Eigen::NaturalOrdering<int>>
+      WithNaturalOrdering;
+  if (ordering_type == AMD) {
+    sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+  } else {
+    sparse_cholesky.reset(
+        new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+  }
+#else
+  typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>, Eigen::Upper>
+      WithAMDOrdering;
+  sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+#endif
+  return sparse_cholesky;
+}
+
+FloatEigenSparseCholesky::~FloatEigenSparseCholesky() {}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
diff --git a/internal/ceres/eigensparse.h b/internal/ceres/eigensparse.h
new file mode 100644
index 0000000..2e6c6f0
--- /dev/null
+++ b/internal/ceres/eigensparse.h
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A simple C++ interface to the Eigen's Sparse Cholesky routines.
+
+#ifndef CERES_INTERNAL_EIGENSPARSE_H_
+#define CERES_INTERNAL_EIGENSPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+#include <memory>
+#include <string>
+
+#include "Eigen/SparseCore"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+
+namespace ceres {
+namespace internal {
+
+class EigenSparseCholesky : public SparseCholesky {
+ public:
+  // Factory
+  static std::unique_ptr<SparseCholesky> Create(
+      const OrderingType ordering_type);
+
+  // SparseCholesky interface.
+  virtual ~EigenSparseCholesky();
+  virtual LinearSolverTerminationType Factorize(
+      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message) = 0;
+};
+
+// Even though the input is double precision linear system, this class
+// solves it by computing a single precision Cholesky factorization.
+class FloatEigenSparseCholesky : public SparseCholesky {
+ public:
+  // Factory
+  static std::unique_ptr<SparseCholesky> Create(
+      const OrderingType ordering_type);
+
+  // SparseCholesky interface.
+  virtual ~FloatEigenSparseCholesky();
+  virtual LinearSolverTerminationType Factorize(
+      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message) = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
+#endif  // CERES_INTERNAL_EIGENSPARSE_H_
diff --git a/internal/ceres/evaluation_callback_test.cc b/internal/ceres/evaluation_callback_test.cc
new file mode 100644
index 0000000..a28d5a8
--- /dev/null
+++ b/internal/ceres/evaluation_callback_test.cc
@@ -0,0 +1,312 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "ceres/solver.h"
+
+#include <cmath>
+#include <limits>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+namespace internal {
+
+// Use an inline hash function to avoid portability wrangling. Algorithm from
+// Daniel Bernstein, known as the "djb2" hash.
+template<typename T>
+uint64_t Djb2Hash(const T* data, const int size) {
+  uint64_t hash = 5381;
+  const uint8_t* data_as_bytes = reinterpret_cast<const uint8_t*>(data);
+  for (int i = 0; i < sizeof(*data) * size; ++i) {
+    hash = hash * 33 + data_as_bytes[i];
+  }
+  return hash;
+}
+
+const double kUninitialized = 0;
+
+// Generally multiple inheritance is a terrible idea, but in this (test)
+// case it makes for a relatively elegant test implementation.
+struct WigglyBowlCostFunctionAndEvaluationCallback :
+      SizedCostFunction<2, 2>,
+      EvaluationCallback  {
+
+  explicit WigglyBowlCostFunctionAndEvaluationCallback(double *parameter)
+      : EvaluationCallback(),
+        user_parameter_block(parameter),
+        prepare_num_calls(0),
+        prepare_requested_jacobians(false),
+        prepare_new_evaluation_point(false),
+        prepare_parameter_hash(kUninitialized),
+        evaluate_num_calls(0),
+        evaluate_last_parameter_hash(kUninitialized) {}
+
+  virtual ~WigglyBowlCostFunctionAndEvaluationCallback() {}
+
+  // Evaluation callback interface. This checks that all the preconditions are
+  // met at the point that Ceres calls into it.
+  virtual void PrepareForEvaluation(bool evaluate_jacobians,
+                                    bool new_evaluation_point) {
+    // At this point, the incoming parameters are implicitly pushed by Ceres
+    // into the user parameter blocks; in contrast to in Evaluate().
+    uint64_t incoming_parameter_hash = Djb2Hash(user_parameter_block, 2);
+
+    // Check: Prepare() & Evaluate() come in pairs, in that order. Before this
+    // call, the number of calls excluding this one should match.
+    EXPECT_EQ(prepare_num_calls, evaluate_num_calls);
+
+    // Check: new_evaluation_point indicates that the parameter has changed.
+    if (new_evaluation_point) {
+      // If it's a new evaluation point, then the parameter should have
+      // changed. Technically, it's not required that it must change but
+      // in practice it does, and that helps with testing.
+      EXPECT_NE(evaluate_last_parameter_hash, incoming_parameter_hash);
+      EXPECT_NE(prepare_parameter_hash, incoming_parameter_hash);
+    } else {
+      // If this is the same evaluation point as last time, ensure that
+      // the parameters match both from the previous evaluate, the
+      // previous prepare, and the current prepare.
+      EXPECT_EQ(evaluate_last_parameter_hash, prepare_parameter_hash);
+      EXPECT_EQ(evaluate_last_parameter_hash, incoming_parameter_hash);
+    }
+
+    // Save details for to check at the next call to Evaluate().
+    prepare_num_calls++;
+    prepare_requested_jacobians = evaluate_jacobians;
+    prepare_new_evaluation_point = new_evaluation_point;
+    prepare_parameter_hash = incoming_parameter_hash;
+  }
+
+  // Cost function interface. This checks that preconditions that were
+  // set as part of the PrepareForEvaluation() call are met in this one.
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    // Cost function implementation of the "Wiggly Bowl" function:
+    //
+    //   1/2 * [(y - a*sin(x))^2 + x^2],
+    //
+    // expressed as a Ceres cost function with two residuals:
+    //
+    //   r[0] = y - a*sin(x)
+    //   r[1] = x.
+    //
+    // This is harder to optimize than the Rosenbrock function because the
+    // minimizer has to navigate a sine-shaped valley while descending the 1D
+    // parabola formed along the y axis. Note that the "a" needs to be more
+    // than 5 to get a strong enough wiggle effect in the cost surface to
+    // trigger failed iterations in the optimizer.
+    const double a = 10.0;
+    double x = (*parameters)[0];
+    double y = (*parameters)[1];
+    residuals[0] = y - a * sin(x);
+    residuals[1] = x;
+    if (jacobians != NULL) {
+      (*jacobians)[2 * 0 + 0] = - a * cos(x);  // df1/dx
+      (*jacobians)[2 * 0 + 1] = 1.0;           // df1/dy
+      (*jacobians)[2 * 1 + 0] = 1.0;           // df2/dx
+      (*jacobians)[2 * 1 + 1] = 0.0;           // df2/dy
+    }
+
+    uint64_t incoming_parameter_hash = Djb2Hash(*parameters, 2);
+
+    // Check: PrepareForEvaluation() & Evaluate() come in pairs, in that order.
+    EXPECT_EQ(prepare_num_calls, evaluate_num_calls + 1);
+
+    // Check: if new_evaluation_point indicates that the parameter has
+    // changed, it has changed; otherwise it is the same.
+    if (prepare_new_evaluation_point) {
+      EXPECT_NE(evaluate_last_parameter_hash, incoming_parameter_hash);
+    } else {
+      EXPECT_NE(evaluate_last_parameter_hash, kUninitialized);
+      EXPECT_EQ(evaluate_last_parameter_hash, incoming_parameter_hash);
+    }
+
+    // Check: Parameter matches value in in parameter blocks during prepare.
+    EXPECT_EQ(prepare_parameter_hash, incoming_parameter_hash);
+
+    // Check: jacobians are requested if they were in PrepareForEvaluation().
+    EXPECT_EQ(prepare_requested_jacobians, jacobians != NULL);
+
+    evaluate_num_calls++;
+    evaluate_last_parameter_hash = incoming_parameter_hash;
+    return true;
+  }
+
+  // Pointer to the parameter block associated with this cost function.
+  // Contents should get set by Ceres before calls to PrepareForEvaluation()
+  // and Evaluate().
+  double* user_parameter_block;
+
+  // Track state: PrepareForEvaluation().
+  //
+  // These track details from the PrepareForEvaluation() call (hence the
+  // "prepare_" prefix), which are checked for consistency in Evaluate().
+  int prepare_num_calls;
+  bool prepare_requested_jacobians;
+  bool prepare_new_evaluation_point;
+  uint64_t prepare_parameter_hash;
+
+  // Track state: Evaluate().
+  //
+  // These track details from the Evaluate() call (hence the "evaluate_"
+  // prefix), which are then checked for consistency in the calls to
+  // PrepareForEvaluation(). Mutable is reasonable for this case.
+  mutable int evaluate_num_calls;
+  mutable uint64_t evaluate_last_parameter_hash;
+};
+
+TEST(EvaluationCallback, WithTrustRegionMinimizer) {
+  double parameters[2] = {50.0, 50.0};
+  const uint64_t original_parameters_hash = Djb2Hash(parameters, 2);
+
+  WigglyBowlCostFunctionAndEvaluationCallback cost_function(parameters);
+  Problem::Options problem_options;
+  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+  Problem problem(problem_options);
+  problem.AddResidualBlock(&cost_function, NULL, parameters);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+  options.max_num_iterations = 300;  // Cost function is hard.
+  options.evaluation_callback = &cost_function;
+
+  // Run the solve. Checking is done inside the cost function / callback.
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+
+  // Ensure that this was a hard cost function (not all steps succeed).
+  EXPECT_GT(summary.num_successful_steps, 10);
+  EXPECT_GT(summary.num_unsuccessful_steps, 10);
+
+  // Ensure PrepareForEvaluation() is called the appropriate number of times.
+  EXPECT_EQ(cost_function.prepare_num_calls,
+            // Unsuccessful steps are evaluated only once (no jacobians).
+            summary.num_unsuccessful_steps +
+            // Successful steps are evaluated twice: with and without jacobians.
+            2 * summary.num_successful_steps
+            // Final iteration doesn't re-evaluate the jacobian.
+            // Note: This may be sensitive to tweaks to the TR algorithm; if
+            // this becomes too brittle, remove this EXPECT_EQ() entirely.
+            - 1);
+
+  // Ensure the callback calls ran a reasonable number of times.
+  EXPECT_GT(cost_function.prepare_num_calls, 0);
+  EXPECT_GT(cost_function.evaluate_num_calls, 0);
+  EXPECT_EQ(cost_function.prepare_num_calls,
+            cost_function.evaluate_num_calls);
+
+  // Ensure that the parameters did actually change.
+  EXPECT_NE(Djb2Hash(parameters, 2), original_parameters_hash);
+}
+
+void WithLineSearchMinimizerImpl(
+    LineSearchType line_search,
+    LineSearchDirectionType line_search_direction,
+    LineSearchInterpolationType line_search_interpolation) {
+  double parameters[2] = {50.0, 50.0};
+  const uint64_t original_parameters_hash = Djb2Hash(parameters, 2);
+
+  WigglyBowlCostFunctionAndEvaluationCallback cost_function(parameters);
+  Problem::Options problem_options;
+  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+  Problem problem(problem_options);
+  problem.AddResidualBlock(&cost_function, NULL, parameters);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+  options.max_num_iterations = 300;  // Cost function is hard.
+  options.minimizer_type = ceres::LINE_SEARCH;
+  options.evaluation_callback = &cost_function;
+  options.line_search_type = line_search;
+  options.line_search_direction_type = line_search_direction;
+  options.line_search_interpolation_type = line_search_interpolation;
+
+  // Run the solve. Checking is done inside the cost function / callback.
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+
+  // Ensure the callback calls ran a reasonable number of times.
+  EXPECT_GT(summary.num_line_search_steps, 10);
+  EXPECT_GT(cost_function.prepare_num_calls, 30);
+  EXPECT_EQ(cost_function.prepare_num_calls,
+            cost_function.evaluate_num_calls);
+
+  // Ensure that the parameters did actually change.
+  EXPECT_NE(Djb2Hash(parameters, 2), original_parameters_hash);
+}
+
+// Note: These tests omit combinations of Wolfe line search with bisection.
+// Due to an implementation quirk in Wolfe line search with bisection, there
+// are calls to re-evaluate an existing point with new_point = true. That
+// causes the (overly) strict tests to break, since they check the new_point
+// preconditions in an if-and-only-if way. Strictly speaking, if new_point =
+// true, the interface does not *require* that the point has changed; only that
+// if new_point = false, the same point is reused.
+//
+// Since the strict checking is useful to verify that there aren't missed
+// optimizations, omit tests of the Wolfe with bisection cases.
+
+// Wolfe with L-BFGS.
+TEST(EvaluationCallback, WithLineSearchMinimizerWolfeLbfgsCubic) {
+  WithLineSearchMinimizerImpl(WOLFE, LBFGS, CUBIC);
+}
+TEST(EvaluationCallback, WithLineSearchMinimizerWolfeLbfgsQuadratic) {
+  WithLineSearchMinimizerImpl(WOLFE, LBFGS, QUADRATIC);
+}
+
+// Wolfe with full BFGS.
+TEST(EvaluationCallback, WithLineSearchMinimizerWolfeBfgsCubic) {
+  WithLineSearchMinimizerImpl(WOLFE, BFGS, CUBIC);
+}
+
+TEST(EvaluationCallback, WithLineSearchMinimizerWolfeBfgsQuadratic) {
+  WithLineSearchMinimizerImpl(WOLFE, BFGS, QUADRATIC);
+}
+
+// Armijo with nonlinear conjugate gradient.
+TEST(EvaluationCallback, WithLineSearchMinimizerArmijoCubic) {
+  WithLineSearchMinimizerImpl(ARMIJO, NONLINEAR_CONJUGATE_GRADIENT, CUBIC);
+}
+
+TEST(EvaluationCallback, WithLineSearchMinimizerArmijoBisection) {
+  WithLineSearchMinimizerImpl(ARMIJO, NONLINEAR_CONJUGATE_GRADIENT, BISECTION);
+}
+
+TEST(EvaluationCallback, WithLineSearchMinimizerArmijoQuadratic) {
+  WithLineSearchMinimizerImpl(ARMIJO, NONLINEAR_CONJUGATE_GRADIENT, QUADRATIC);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
new file mode 100644
index 0000000..8387983
--- /dev/null
+++ b/internal/ceres/evaluator.cc
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include <vector>
+#include "ceres/block_evaluate_preparer.h"
+#include "ceres/block_jacobian_writer.h"
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/dense_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_finalizer.h"
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/program_evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Evaluator::~Evaluator() {}
+
+Evaluator* Evaluator::Create(const Evaluator::Options& options,
+                             Program* program,
+                             std::string* error) {
+  CHECK(options.context != NULL);
+
+  switch (options.linear_solver_type) {
+    case DENSE_QR:
+    case DENSE_NORMAL_CHOLESKY:
+      return new ProgramEvaluator<ScratchEvaluatePreparer,
+                                  DenseJacobianWriter>(options,
+                                                       program);
+    case DENSE_SCHUR:
+    case SPARSE_SCHUR:
+    case ITERATIVE_SCHUR:
+    case CGNR:
+      return new ProgramEvaluator<BlockEvaluatePreparer,
+                                  BlockJacobianWriter>(options,
+                                                       program);
+    case SPARSE_NORMAL_CHOLESKY:
+      if (options.dynamic_sparsity) {
+        return new ProgramEvaluator<ScratchEvaluatePreparer,
+                                    DynamicCompressedRowJacobianWriter,
+                                    DynamicCompressedRowJacobianFinalizer>(
+                                        options, program);
+      } else {
+        return new ProgramEvaluator<BlockEvaluatePreparer,
+                                    BlockJacobianWriter>(options,
+                                                         program);
+      }
+
+    default:
+      *error = "Invalid Linear Solver Type. Unable to create evaluator.";
+      return NULL;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
new file mode 100644
index 0000000..b820958
--- /dev/null
+++ b/internal/ceres/evaluator.h
@@ -0,0 +1,172 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_EVALUATOR_H_
+#define CERES_INTERNAL_EVALUATOR_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "ceres/context_impl.h"
+#include "ceres/execution_summary.h"
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+struct CRSMatrix;
+class EvaluationCallback;
+
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+// The Evaluator interface offers a way to interact with a least squares cost
+// function that is useful for an optimizer that wants to minimize the least
+// squares objective. This insulates the optimizer from issues like Jacobian
+// storage, parameterization, etc.
+class Evaluator {
+ public:
+  virtual ~Evaluator();
+
+  struct Options {
+    int num_threads = 1;
+    int num_eliminate_blocks = -1;
+    LinearSolverType linear_solver_type = DENSE_QR;
+    bool dynamic_sparsity = false;
+    ContextImpl* context = nullptr;
+    EvaluationCallback* evaluation_callback = nullptr;
+  };
+
+  static Evaluator* Create(const Options& options,
+                           Program* program,
+                           std::string* error);
+
+  // Build and return a sparse matrix for storing and working with the Jacobian
+  // of the objective function. The jacobian has dimensions
+  // NumEffectiveParameters() by NumParameters(), and is typically extremely
+  // sparse. Since the sparsity pattern of the Jacobian remains constant over
+  // the lifetime of the optimization problem, this method is used to
+  // instantiate a SparseMatrix object with the appropriate sparsity structure
+  // (which can be an expensive operation) and then reused by the optimization
+  // algorithm and the various linear solvers.
+  //
+  // It is expected that the classes implementing this interface will be aware
+  // of their client's requirements for the kind of sparse matrix storage and
+  // layout that is needed for an efficient implementation. For example
+  // CompressedRowOptimizationProblem creates a compressed row representation of
+  // the jacobian for use with CHOLMOD, where as BlockOptimizationProblem
+  // creates a BlockSparseMatrix representation of the jacobian for use in the
+  // Schur complement based methods.
+  virtual SparseMatrix* CreateJacobian() const = 0;
+
+  // Options struct to control Evaluator::Evaluate;
+  struct EvaluateOptions {
+    // If false, the loss function correction is not applied to the
+    // residual blocks.
+    bool apply_loss_function = true;
+
+    // If false, this evaluation point is the same as the last one.
+    bool new_evaluation_point = true;
+  };
+
+  // Evaluate the cost function for the given state. Returns the cost,
+  // residuals, and jacobian in the corresponding arguments. Both residuals and
+  // jacobian are optional; to avoid computing them, pass NULL.
+  //
+  // If non-NULL, the Jacobian must have a suitable sparsity pattern; only the
+  // values array of the jacobian is modified.
+  //
+  // state is an array of size NumParameters(), cost is a pointer to a single
+  // double, and residuals is an array of doubles of size NumResiduals().
+  virtual bool Evaluate(const EvaluateOptions& evaluate_options,
+                        const double* state,
+                        double* cost,
+                        double* residuals,
+                        double* gradient,
+                        SparseMatrix* jacobian) = 0;
+
+  // Variant of Evaluator::Evaluate where the user wishes to use the
+  // default EvaluateOptions struct. This is mostly here as a
+  // convenience method.
+  bool Evaluate(const double* state,
+                double* cost,
+                double* residuals,
+                double* gradient,
+                SparseMatrix* jacobian) {
+    return Evaluate(EvaluateOptions(),
+                    state,
+                    cost,
+                    residuals,
+                    gradient,
+                    jacobian);
+  }
+
+  // Make a change delta (of size NumEffectiveParameters()) to state (of size
+  // NumParameters()) and store the result in state_plus_delta.
+  //
+  // In the case that there are no parameterizations used, this is equivalent to
+  //
+  //   state_plus_delta[i] = state[i] + delta[i] ;
+  //
+  // however, the mapping is more complicated in the case of parameterizations
+  // like quaternions. This is the same as the "Plus()" operation in
+  // local_parameterization.h, but operating over the entire state vector for a
+  // problem.
+  virtual bool Plus(const double* state,
+                    const double* delta,
+                    double* state_plus_delta) const = 0;
+
+  // The number of parameters in the optimization problem.
+  virtual int NumParameters() const = 0;
+
+  // This is the effective number of parameters that the optimizer may adjust.
+  // This applies when there are parameterizations on some of the parameters.
+  virtual int NumEffectiveParameters()  const = 0;
+
+  // The number of residuals in the optimization problem.
+  virtual int NumResiduals() const = 0;
+
+  // The following two methods return copies instead of references so
+  // that the base class implementation does not have to worry about
+  // life time issues. Further, these calls are not expected to be
+  // frequent or performance sensitive.
+  virtual std::map<std::string, CallStatistics> Statistics() const {
+    return std::map<std::string, CallStatistics>();
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_EVALUATOR_H_
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
new file mode 100644
index 0000000..a156b89
--- /dev/null
+++ b/internal/ceres/evaluator_test.cc
@@ -0,0 +1,677 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Tests shared across evaluators. The tests try all combinations of linear
+// solver and num_eliminate_blocks (for schur-based solvers).
+
+#include "ceres/evaluator.h"
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/cost_function.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/evaluator_test_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+// TODO(keir): Consider pushing this into a common test utils file.
+template <int kFactor, int kNumResiduals, int... Ns>
+class ParameterIgnoringCostFunction
+    : public SizedCostFunction<kNumResiduals, Ns...> {
+  typedef SizedCostFunction<kNumResiduals, Ns...> Base;
+
+ public:
+  explicit ParameterIgnoringCostFunction(bool succeeds = true)
+      : succeeds_(succeeds) {}
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < Base::num_residuals(); ++i) {
+      residuals[i] = i + 1;
+    }
+    if (jacobians) {
+      for (int k = 0; k < Base::parameter_block_sizes().size(); ++k) {
+        // The jacobians here are full sized, but they are transformed in the
+        // evaluator into the "local" jacobian. In the tests, the "subset
+        // constant" parameterization is used, which should pick out columns
+        // from these jacobians. Put values in the jacobian that make this
+        // obvious; in particular, make the jacobians like this:
+        //
+        //   1 2 3 4 ...
+        //   1 2 3 4 ...   .*  kFactor
+        //   1 2 3 4 ...
+        //
+        // where the multiplication by kFactor makes it easier to distinguish
+        // between Jacobians of different residuals for the same parameter.
+        if (jacobians[k] != nullptr) {
+          MatrixRef jacobian(jacobians[k],
+                             Base::num_residuals(),
+                             Base::parameter_block_sizes()[k]);
+          for (int j = 0; j < Base::parameter_block_sizes()[k]; ++j) {
+            jacobian.col(j).setConstant(kFactor * (j + 1));
+          }
+        }
+      }
+    }
+    return succeeds_;
+  }
+
+ private:
+  bool succeeds_;
+};
+
+struct EvaluatorTestOptions {
+  EvaluatorTestOptions(LinearSolverType linear_solver_type,
+                       int num_eliminate_blocks,
+                       bool dynamic_sparsity = false)
+    : linear_solver_type(linear_solver_type),
+      num_eliminate_blocks(num_eliminate_blocks),
+      dynamic_sparsity(dynamic_sparsity) {}
+
+  LinearSolverType linear_solver_type;
+  int num_eliminate_blocks;
+  bool dynamic_sparsity;
+};
+
+struct EvaluatorTest
+    : public ::testing::TestWithParam<EvaluatorTestOptions> {
+  Evaluator* CreateEvaluator(Program* program) {
+    // This program is straight from the ProblemImpl, and so has no index/offset
+    // yet; compute it here as required by the evaluator implementations.
+    program->SetParameterOffsetsAndIndex();
+
+    if (VLOG_IS_ON(1)) {
+      string report;
+      StringAppendF(&report, "Creating evaluator with type: %d",
+                    GetParam().linear_solver_type);
+      if (GetParam().linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+        StringAppendF(&report, ", dynamic_sparsity: %d",
+                      GetParam().dynamic_sparsity);
+      }
+      StringAppendF(&report, " and num_eliminate_blocks: %d",
+                    GetParam().num_eliminate_blocks);
+      VLOG(1) << report;
+    }
+    Evaluator::Options options;
+    options.linear_solver_type = GetParam().linear_solver_type;
+    options.num_eliminate_blocks = GetParam().num_eliminate_blocks;
+    options.dynamic_sparsity = GetParam().dynamic_sparsity;
+    options.context = problem.context();
+    string error;
+    return Evaluator::Create(options, program, &error);
+  }
+
+  void EvaluateAndCompare(ProblemImpl *problem,
+                          int expected_num_rows,
+                          int expected_num_cols,
+                          double expected_cost,
+                          const double* expected_residuals,
+                          const double* expected_gradient,
+                          const double* expected_jacobian) {
+    std::unique_ptr<Evaluator> evaluator(
+        CreateEvaluator(problem->mutable_program()));
+    int num_residuals = expected_num_rows;
+    int num_parameters = expected_num_cols;
+
+    double cost = -1;
+
+    Vector residuals(num_residuals);
+    residuals.setConstant(-2000);
+
+    Vector gradient(num_parameters);
+    gradient.setConstant(-3000);
+
+    std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+
+    ASSERT_EQ(expected_num_rows, evaluator->NumResiduals());
+    ASSERT_EQ(expected_num_cols, evaluator->NumEffectiveParameters());
+    ASSERT_EQ(expected_num_rows, jacobian->num_rows());
+    ASSERT_EQ(expected_num_cols, jacobian->num_cols());
+
+    vector<double> state(evaluator->NumParameters());
+
+    ASSERT_TRUE(evaluator->Evaluate(
+          &state[0],
+          &cost,
+          expected_residuals != nullptr ? &residuals[0]  : nullptr,
+          expected_gradient  != nullptr ? &gradient[0]   : nullptr,
+          expected_jacobian  != nullptr ? jacobian.get() : nullptr));
+
+    Matrix actual_jacobian;
+    if (expected_jacobian != nullptr) {
+      jacobian->ToDenseMatrix(&actual_jacobian);
+    }
+
+    CompareEvaluations(expected_num_rows,
+                       expected_num_cols,
+                       expected_cost,
+                       expected_residuals,
+                       expected_gradient,
+                       expected_jacobian,
+                       cost,
+                       &residuals[0],
+                       &gradient[0],
+                       actual_jacobian.data());
+  }
+
+  // Try all combinations of parameters for the evaluator.
+  void CheckAllEvaluationCombinations(const ExpectedEvaluation &expected) {
+    for (int i = 0; i < 8; ++i) {
+      EvaluateAndCompare(&problem,
+                         expected.num_rows,
+                         expected.num_cols,
+                         expected.cost,
+                         (i & 1) ? expected.residuals : nullptr,
+                         (i & 2) ? expected.gradient  : nullptr,
+                         (i & 4) ? expected.jacobian  : nullptr);
+    }
+  }
+
+  // The values are ignored completely by the cost function.
+  double x[2];
+  double y[3];
+  double z[4];
+
+  ProblemImpl problem;
+};
+
+void SetSparseMatrixConstant(SparseMatrix* sparse_matrix, double value) {
+  VectorRef(sparse_matrix->mutable_values(),
+            sparse_matrix->num_nonzeros()).setConstant(value);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblem) {
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
+                           nullptr,
+                           x, y, z);
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    3, 9,
+    // Cost
+    7.0,
+    // Residuals
+    { 1.0, 2.0, 3.0 },
+    // Gradient
+    { 6.0, 12.0,              // x
+      6.0, 12.0, 18.0,        // y
+      6.0, 12.0, 18.0, 24.0,  // z
+    },
+    // Jacobian
+    //   x          y             z
+    { 1, 2,   1, 2, 3,   1, 2, 3, 4,
+      1, 2,   1, 2, 3,   1, 2, 3, 4,
+      1, 2,   1, 2, 3,   1, 2, 3, 4
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblemWithPermutedParameters) {
+  // Add the parameters in explicit order to force the ordering in the program.
+  problem.AddParameterBlock(x,  2);
+  problem.AddParameterBlock(y,  3);
+  problem.AddParameterBlock(z,  4);
+
+  // Then use a cost function which is similar to the others, but swap around
+  // the ordering of the parameters to the cost function. This shouldn't affect
+  // the jacobian evaluation, but requires explicit handling in the evaluators.
+  // At one point the compressed row evaluator had a bug that went undetected
+  // for a long time, since by chance most users added parameters to the problem
+  // in the same order that they occurred as parameters to a cost function.
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 4, 3, 2>,
+                           nullptr,
+                           z, y, x);
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    3, 9,
+    // Cost
+    7.0,
+    // Residuals
+    { 1.0, 2.0, 3.0 },
+    // Gradient
+    { 6.0, 12.0,              // x
+      6.0, 12.0, 18.0,        // y
+      6.0, 12.0, 18.0, 24.0,  // z
+    },
+    // Jacobian
+    //   x          y             z
+    { 1, 2,   1, 2, 3,   1, 2, 3, 4,
+      1, 2,   1, 2, 3,   1, 2, 3, 4,
+      1, 2,   1, 2, 3,   1, 2, 3, 4
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblemWithNuisanceParameters) {
+  // These parameters are not used.
+  double a[2];
+  double b[1];
+  double c[1];
+  double d[3];
+
+  // Add the parameters in a mixed order so the Jacobian is "checkered" with the
+  // values from the other parameters.
+  problem.AddParameterBlock(a, 2);
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(b, 1);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(c, 1);
+  problem.AddParameterBlock(z, 4);
+  problem.AddParameterBlock(d, 3);
+
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
+                           nullptr,
+                           x, y, z);
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    3, 16,
+    // Cost
+    7.0,
+    // Residuals
+    { 1.0, 2.0, 3.0 },
+    // Gradient
+    { 0.0, 0.0,               // a
+      6.0, 12.0,              // x
+      0.0,                    // b
+      6.0, 12.0, 18.0,        // y
+      0.0,                    // c
+      6.0, 12.0, 18.0, 24.0,  // z
+      0.0, 0.0, 0.0,          // d
+    },
+    // Jacobian
+    //   a        x     b           y     c              z           d
+    { 0, 0,    1, 2,    0,    1, 2, 3,    0,    1, 2, 3, 4,    0, 0, 0,
+      0, 0,    1, 2,    0,    1, 2, 3,    0,    1, 2, 3, 4,    0, 0, 0,
+      0, 0,    1, 2,    0,    1, 2, 3,    0,    1, 2, 3, 4,    0, 0, 0
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualProblem) {
+  // Add the parameters in explicit order to force the ordering in the program.
+  problem.AddParameterBlock(x,  2);
+  problem.AddParameterBlock(y,  3);
+  problem.AddParameterBlock(z,  4);
+
+  // f(x, y) in R^2
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+                           nullptr,
+                           x, y);
+
+  // g(x, z) in R^3
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+                           nullptr,
+                           x, z);
+
+  // h(y, z) in R^4
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+                           nullptr,
+                           y, z);
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    9, 9,
+    // Cost
+    // f       g           h
+    (  1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+    // Residuals
+    { 1.0, 2.0,           // f
+      1.0, 2.0, 3.0,      // g
+      1.0, 2.0, 3.0, 4.0  // h
+    },
+    // Gradient
+    { 15.0, 30.0,               // x
+      33.0, 66.0, 99.0,         // y
+      42.0, 84.0, 126.0, 168.0  // z
+    },
+    // Jacobian
+    //                x        y           z
+    {   /* f(x, y) */ 1, 2,    1, 2, 3,    0, 0, 0, 0,
+                      1, 2,    1, 2, 3,    0, 0, 0, 0,
+
+        /* g(x, z) */ 2, 4,    0, 0, 0,    2, 4, 6, 8,
+                      2, 4,    0, 0, 0,    2, 4, 6, 8,
+                      2, 4,    0, 0, 0,    2, 4, 6, 8,
+
+        /* h(y, z) */ 0, 0,    3, 6, 9,    3, 6, 9, 12,
+                      0, 0,    3, 6, 9,    3, 6, 9, 12,
+                      0, 0,    3, 6, 9,    3, 6, 9, 12,
+                      0, 0,    3, 6, 9,    3, 6, 9, 12
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualsWithLocalParameterizations) {
+  // Add the parameters in explicit order to force the ordering in the program.
+  problem.AddParameterBlock(x,  2);
+
+  // Fix y's first dimension.
+  vector<int> y_fixed;
+  y_fixed.push_back(0);
+  problem.AddParameterBlock(y, 3, new SubsetParameterization(3, y_fixed));
+
+  // Fix z's second dimension.
+  vector<int> z_fixed;
+  z_fixed.push_back(1);
+  problem.AddParameterBlock(z, 4, new SubsetParameterization(4, z_fixed));
+
+  // f(x, y) in R^2
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+                           nullptr,
+                           x, y);
+
+  // g(x, z) in R^3
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+                           nullptr,
+                           x, z);
+
+  // h(y, z) in R^4
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+                           nullptr,
+                           y, z);
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    9, 7,
+    // Cost
+    // f       g           h
+    (  1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+    // Residuals
+    { 1.0, 2.0,           // f
+      1.0, 2.0, 3.0,      // g
+      1.0, 2.0, 3.0, 4.0  // h
+    },
+    // Gradient
+    { 15.0, 30.0,         // x
+      66.0, 99.0,         // y
+      42.0, 126.0, 168.0  // z
+    },
+    // Jacobian
+    //                x        y           z
+    {   /* f(x, y) */ 1, 2,    2, 3,    0, 0, 0,
+                      1, 2,    2, 3,    0, 0, 0,
+
+        /* g(x, z) */ 2, 4,    0, 0,    2, 6, 8,
+                      2, 4,    0, 0,    2, 6, 8,
+                      2, 4,    0, 0,    2, 6, 8,
+
+        /* h(y, z) */ 0, 0,    6, 9,    3, 9, 12,
+                      0, 0,    6, 9,    3, 9, 12,
+                      0, 0,    6, 9,    3, 9, 12,
+                      0, 0,    6, 9,    3, 9, 12
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualProblemWithSomeConstantParameters) {
+  // The values are ignored completely by the cost function.
+  double x[2];
+  double y[3];
+  double z[4];
+
+  // Add the parameters in explicit order to force the ordering in the program.
+  problem.AddParameterBlock(x,  2);
+  problem.AddParameterBlock(y,  3);
+  problem.AddParameterBlock(z,  4);
+
+  // f(x, y) in R^2
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+                          nullptr,
+                          x, y);
+
+  // g(x, z) in R^3
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+                          nullptr,
+                          x, z);
+
+  // h(y, z) in R^4
+  problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+                           nullptr,
+                           y, z);
+
+  // For this test, "z" is constant.
+  problem.SetParameterBlockConstant(z);
+
+  // Create the reduced program which is missing the fixed "z" variable.
+  // Normally, the preprocessing of the program that happens in solver_impl
+  // takes care of this, but we don't want to invoke the solver here.
+  Program reduced_program;
+  vector<ParameterBlock*>* parameter_blocks =
+      problem.mutable_program()->mutable_parameter_blocks();
+
+  // "z" is the last parameter; save it for later and pop it off temporarily.
+  // Note that "z" will still get read during evaluation, so it cannot be
+  // deleted at this point.
+  ParameterBlock* parameter_block_z = parameter_blocks->back();
+  parameter_blocks->pop_back();
+
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    9, 5,
+    // Cost
+    // f       g           h
+    (  1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+    // Residuals
+    { 1.0, 2.0,           // f
+      1.0, 2.0, 3.0,      // g
+      1.0, 2.0, 3.0, 4.0  // h
+    },
+    // Gradient
+    { 15.0, 30.0,        // x
+      33.0, 66.0, 99.0,  // y
+    },
+    // Jacobian
+    //                x        y
+    {   /* f(x, y) */ 1, 2,    1, 2, 3,
+                      1, 2,    1, 2, 3,
+
+        /* g(x, z) */ 2, 4,    0, 0, 0,
+                      2, 4,    0, 0, 0,
+                      2, 4,    0, 0, 0,
+
+        /* h(y, z) */ 0, 0,    3, 6, 9,
+                      0, 0,    3, 6, 9,
+                      0, 0,    3, 6, 9,
+                      0, 0,    3, 6, 9
+    }
+  };
+  CheckAllEvaluationCombinations(expected);
+
+  // Restore parameter block z, so it will get freed in a consistent way.
+  parameter_blocks->push_back(parameter_block_z);
+}
+
+TEST_P(EvaluatorTest, EvaluatorAbortsForResidualsThatFailToEvaluate) {
+  // Switch the return value to failure.
+  problem.AddResidualBlock(
+      new ParameterIgnoringCostFunction<20, 3, 2, 3, 4>(false),
+      nullptr,
+      x,
+      y,
+      z);
+
+  // The values are ignored.
+  double state[9];
+
+  std::unique_ptr<Evaluator> evaluator(
+      CreateEvaluator(problem.mutable_program()));
+  std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+  double cost;
+  EXPECT_FALSE(evaluator->Evaluate(state, &cost, nullptr, nullptr, nullptr));
+}
+
+// In the pairs, the first argument is the linear solver type, and the second
+// argument is num_eliminate_blocks. Changing the num_eliminate_blocks only
+// makes sense for the schur-based solvers.
+//
+// Try all values of num_eliminate_blocks that make sense given that in the
+// tests a maximum of 4 parameter blocks are present.
+INSTANTIATE_TEST_CASE_P(
+    LinearSolvers,
+    EvaluatorTest,
+    ::testing::Values(EvaluatorTestOptions(DENSE_QR, 0),
+                      EvaluatorTestOptions(DENSE_SCHUR, 0),
+                      EvaluatorTestOptions(DENSE_SCHUR, 1),
+                      EvaluatorTestOptions(DENSE_SCHUR, 2),
+                      EvaluatorTestOptions(DENSE_SCHUR, 3),
+                      EvaluatorTestOptions(DENSE_SCHUR, 4),
+                      EvaluatorTestOptions(SPARSE_SCHUR, 0),
+                      EvaluatorTestOptions(SPARSE_SCHUR, 1),
+                      EvaluatorTestOptions(SPARSE_SCHUR, 2),
+                      EvaluatorTestOptions(SPARSE_SCHUR, 3),
+                      EvaluatorTestOptions(SPARSE_SCHUR, 4),
+                      EvaluatorTestOptions(ITERATIVE_SCHUR, 0),
+                      EvaluatorTestOptions(ITERATIVE_SCHUR, 1),
+                      EvaluatorTestOptions(ITERATIVE_SCHUR, 2),
+                      EvaluatorTestOptions(ITERATIVE_SCHUR, 3),
+                      EvaluatorTestOptions(ITERATIVE_SCHUR, 4),
+                      EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, false),
+                      EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, true)));
+
+// Simple cost function used to check if the evaluator is sensitive to
+// state changes.
+class ParameterSensitiveCostFunction : public SizedCostFunction<2, 2> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    double x1 = parameters[0][0];
+    double x2 = parameters[0][1];
+    residuals[0] = x1 * x1;
+    residuals[1] = x2 * x2;
+
+    if (jacobians != nullptr) {
+      double* jacobian = jacobians[0];
+      if (jacobian != nullptr) {
+        jacobian[0] = 2.0 * x1;
+        jacobian[1] = 0.0;
+        jacobian[2] = 0.0;
+        jacobian[3] = 2.0 * x2;
+      }
+    }
+    return true;
+  }
+};
+
+TEST(Evaluator, EvaluatorRespectsParameterChanges) {
+  ProblemImpl problem;
+
+  double x[2];
+  x[0] = 1.0;
+  x[1] = 1.0;
+
+  problem.AddResidualBlock(new ParameterSensitiveCostFunction(), nullptr, x);
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  Evaluator::Options options;
+  options.linear_solver_type = DENSE_QR;
+  options.num_eliminate_blocks = 0;
+  options.context = problem.context();
+  string error;
+  std::unique_ptr<Evaluator> evaluator(
+      Evaluator::Create(options, program, &error));
+  std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+
+  ASSERT_EQ(2, jacobian->num_rows());
+  ASSERT_EQ(2, jacobian->num_cols());
+
+  double state[2];
+  state[0] = 2.0;
+  state[1] = 3.0;
+
+  // The original state of a residual block comes from the user's
+  // state. So the original state is 1.0, 1.0, and the only way we get
+  // the 2.0, 3.0 results in the following tests is if it respects the
+  // values in the state vector.
+
+  // Cost only; no residuals and no jacobian.
+  {
+    double cost = -1;
+    ASSERT_TRUE(evaluator->Evaluate(state, &cost, nullptr, nullptr, nullptr));
+    EXPECT_EQ(48.5, cost);
+  }
+
+  // Cost and residuals, no jacobian.
+  {
+    double cost = -1;
+    double residuals[2] = {-2, -2};
+    ASSERT_TRUE(evaluator->Evaluate(state, &cost, residuals, nullptr, nullptr));
+    EXPECT_EQ(48.5, cost);
+    EXPECT_EQ(4, residuals[0]);
+    EXPECT_EQ(9, residuals[1]);
+  }
+
+  // Cost, residuals, and jacobian.
+  {
+    double cost = -1;
+    double residuals[2] = {-2, -2};
+    SetSparseMatrixConstant(jacobian.get(), -1);
+    ASSERT_TRUE(
+        evaluator->Evaluate(state, &cost, residuals, nullptr, jacobian.get()));
+    EXPECT_EQ(48.5, cost);
+    EXPECT_EQ(4, residuals[0]);
+    EXPECT_EQ(9, residuals[1]);
+    Matrix actual_jacobian;
+    jacobian->ToDenseMatrix(&actual_jacobian);
+
+    Matrix expected_jacobian(2, 2);
+    expected_jacobian << 2 * state[0], 0, 0, 2 * state[1];
+
+    EXPECT_TRUE((actual_jacobian.array() == expected_jacobian.array()).all())
+        << "Actual:\n"
+        << actual_jacobian << "\nExpected:\n"
+        << expected_jacobian;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator_test_utils.cc b/internal/ceres/evaluator_test_utils.cc
new file mode 100644
index 0000000..36dc21c
--- /dev/null
+++ b/internal/ceres/evaluator_test_utils.cc
@@ -0,0 +1,89 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/evaluator_test_utils.h"
+#include "ceres/internal/eigen.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+void CompareEvaluations(int expected_num_rows,
+                        int expected_num_cols,
+                        double expected_cost,
+                        const double* expected_residuals,
+                        const double* expected_gradient,
+                        const double* expected_jacobian,
+                        const double actual_cost,
+                        const double* actual_residuals,
+                        const double* actual_gradient,
+                        const double* actual_jacobian) {
+  EXPECT_EQ(expected_cost, actual_cost);
+
+  if (expected_residuals != NULL) {
+    ConstVectorRef expected_residuals_vector(expected_residuals,
+                                             expected_num_rows);
+    ConstVectorRef actual_residuals_vector(actual_residuals,
+                                           expected_num_rows);
+    EXPECT_TRUE((actual_residuals_vector.array() ==
+                 expected_residuals_vector.array()).all())
+        << "Actual:\n" << actual_residuals_vector
+        << "\nExpected:\n" << expected_residuals_vector;
+  }
+
+  if (expected_gradient != NULL) {
+    ConstVectorRef expected_gradient_vector(expected_gradient,
+                                            expected_num_cols);
+    ConstVectorRef actual_gradient_vector(actual_gradient,
+                                            expected_num_cols);
+
+    EXPECT_TRUE((actual_gradient_vector.array() ==
+                 expected_gradient_vector.array()).all())
+        << "Actual:\n" << actual_gradient_vector.transpose()
+        << "\nExpected:\n" << expected_gradient_vector.transpose();
+  }
+
+  if (expected_jacobian != NULL) {
+    ConstMatrixRef expected_jacobian_matrix(expected_jacobian,
+                                            expected_num_rows,
+                                            expected_num_cols);
+    ConstMatrixRef actual_jacobian_matrix(actual_jacobian,
+                                          expected_num_rows,
+                                          expected_num_cols);
+    EXPECT_TRUE((actual_jacobian_matrix.array() ==
+                 expected_jacobian_matrix.array()).all())
+        << "Actual:\n" << actual_jacobian_matrix
+        << "\nExpected:\n" << expected_jacobian_matrix;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/evaluator_test_utils.h b/internal/ceres/evaluator_test_utils.h
new file mode 100644
index 0000000..7401f04
--- /dev/null
+++ b/internal/ceres/evaluator_test_utils.h
@@ -0,0 +1,60 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//
+// Test utils used for evaluation testing.
+
+namespace ceres {
+namespace internal {
+
+// Fixed sized struct for storing an evaluation.
+struct ExpectedEvaluation {
+  int num_rows;
+  int num_cols;
+  double cost;
+  const double residuals[50];
+  const double gradient[50];
+  const double jacobian[200];
+};
+
+// Compare two evaluations.
+void CompareEvaluations(int expected_num_rows,
+                        int expected_num_cols,
+                        double expected_cost,
+                        const double* expected_residuals,
+                        const double* expected_gradient,
+                        const double* expected_jacobian,
+                        const double actual_cost,
+                        const double* actual_residuals,
+                        const double* actual_gradient,
+                        const double* actual_jacobian);
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/execution_summary.h b/internal/ceres/execution_summary.h
new file mode 100644
index 0000000..17fd882
--- /dev/null
+++ b/internal/ceres/execution_summary.h
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_EXECUTION_SUMMARY_H_
+#define CERES_INTERNAL_EXECUTION_SUMMARY_H_
+
+#include <map>
+#include <mutex>
+#include <string>
+
+#include "ceres/internal/port.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+struct CallStatistics {
+  CallStatistics() : time(0.), calls(0) {}
+  double time;
+  int calls;
+};
+
+// Struct used by various objects to report statistics about their
+// execution.
+class ExecutionSummary {
+ public:
+  void IncrementTimeBy(const std::string& name, const double value) {
+    std::lock_guard<std::mutex> l(mutex_);
+    CallStatistics& call_stats = statistics_[name];
+    call_stats.time += value;
+    ++call_stats.calls;
+  }
+
+  const std::map<std::string, CallStatistics>& statistics() const {
+    return statistics_;
+  }
+
+ private:
+  std::mutex mutex_;
+  std::map<std::string, CallStatistics> statistics_;
+};
+
+class ScopedExecutionTimer {
+ public:
+  ScopedExecutionTimer(const std::string& name, ExecutionSummary* summary)
+      : start_time_(WallTimeInSeconds()), name_(name), summary_(summary) {}
+
+  ~ScopedExecutionTimer() {
+    summary_->IncrementTimeBy(name_, WallTimeInSeconds() - start_time_);
+  }
+
+ private:
+  const double start_time_;
+  const std::string name_;
+  ExecutionSummary* summary_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_EXECUTION_SUMMARY_H_
diff --git a/internal/ceres/file.cc b/internal/ceres/file.cc
new file mode 100644
index 0000000..c95a44d
--- /dev/null
+++ b/internal/ceres/file.cc
@@ -0,0 +1,95 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Really simple file IO.
+
+#include "ceres/file.h"
+
+#include <cstdio>
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+void WriteStringToFileOrDie(const string &data, const string &filename) {
+  FILE* file_descriptor = fopen(filename.c_str(), "wb");
+  if (!file_descriptor) {
+    LOG(FATAL) << "Couldn't write to file: " << filename;
+  }
+  fwrite(data.c_str(), 1, data.size(), file_descriptor);
+  fclose(file_descriptor);
+}
+
+void ReadFileToStringOrDie(const string &filename, string *data) {
+  FILE* file_descriptor = fopen(filename.c_str(), "r");
+
+  if (!file_descriptor) {
+    LOG(FATAL) << "Couldn't read file: " << filename;
+  }
+
+  // Resize the input buffer appropriately.
+  fseek(file_descriptor, 0L, SEEK_END);
+  int num_bytes = ftell(file_descriptor);
+  data->resize(num_bytes);
+
+  // Read the data.
+  fseek(file_descriptor, 0L, SEEK_SET);
+  int num_read = fread(&((*data)[0]),
+                       sizeof((*data)[0]),
+                       num_bytes,
+                       file_descriptor);
+  if (num_read != num_bytes) {
+    LOG(FATAL) << "Couldn't read all of " << filename
+               << "expected bytes: " << num_bytes * sizeof((*data)[0])
+               << "actual bytes: " << num_read;
+  }
+  fclose(file_descriptor);
+}
+
+string JoinPath(const string& dirname, const string& basename) {
+#ifdef _WIN32
+    static const char separator = '\\';
+#else
+    static const char separator = '/';
+#endif  // _WIN32
+
+  if ((!basename.empty() && basename[0] == separator) || dirname.empty()) {
+    return basename;
+  } else if (dirname[dirname.size() - 1] == separator) {
+    return dirname + basename;
+  } else {
+    return dirname + string(&separator, 1) + basename;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/file.h b/internal/ceres/file.h
new file mode 100644
index 0000000..219b459
--- /dev/null
+++ b/internal/ceres/file.h
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Simple file IO support. This is a portability shim.
+
+#ifndef CERES_INTERNAL_FILE_H_
+#define CERES_INTERNAL_FILE_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+void WriteStringToFileOrDie(const std::string &data,
+                            const std::string &filename);
+void ReadFileToStringOrDie(const std::string &filename, std::string *data);
+
+// Join two path components, adding a slash if necessary.  If basename is an
+// absolute path then JoinPath ignores dirname and simply returns basename.
+std::string JoinPath(const std::string& dirname, const std::string& basename);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_FILE_H_
diff --git a/internal/ceres/float_cxsparse.cc b/internal/ceres/float_cxsparse.cc
new file mode 100644
index 0000000..6c68830
--- /dev/null
+++ b/internal/ceres/float_cxsparse.cc
@@ -0,0 +1,47 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/float_cxsparse.h"
+
+#if !defined(CERES_NO_CXSPARSE)
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> FloatCXSparseCholesky::Create(
+    OrderingType ordering_type) {
+  LOG(FATAL) << "FloatCXSparseCholesky is not available.";
+  return std::unique_ptr<SparseCholesky>();
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // !defined(CERES_NO_CXSPARSE)
diff --git a/internal/ceres/float_cxsparse.h b/internal/ceres/float_cxsparse.h
new file mode 100644
index 0000000..57fc5e4
--- /dev/null
+++ b/internal/ceres/float_cxsparse.h
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FLOAT_CXSPARSE_H_
+#define CERES_INTERNAL_FLOAT_CXSPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#if !defined(CERES_NO_CXSPARSE)
+
+#include <memory>
+#include "ceres/sparse_cholesky.h"
+
+namespace ceres {
+namespace internal {
+
+// Fake implementation of a single precision Sparse Cholesky using
+// CXSparse.
+class FloatCXSparseCholesky : public SparseCholesky {
+ public:
+  static std::unique_ptr<SparseCholesky> Create(
+      OrderingType ordering_type);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // !defined(CERES_NO_CXSPARSE)
+
+#endif  // CERES_INTERNAL_FLOAT_CXSPARSE_H_
diff --git a/internal/ceres/float_suitesparse.cc b/internal/ceres/float_suitesparse.cc
new file mode 100644
index 0000000..0360457
--- /dev/null
+++ b/internal/ceres/float_suitesparse.cc
@@ -0,0 +1,47 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/float_suitesparse.h"
+
+#if !defined(CERES_NO_SUITESPARSE)
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> FloatSuiteSparseCholesky::Create(
+    OrderingType ordering_type) {
+  LOG(FATAL) << "FloatSuiteSparseCholesky is not available.";
+  return std::unique_ptr<SparseCholesky>();
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // !defined(CERES_NO_SUITESPARSE)
diff --git a/internal/ceres/float_suitesparse.h b/internal/ceres/float_suitesparse.h
new file mode 100644
index 0000000..ac4d409
--- /dev/null
+++ b/internal/ceres/float_suitesparse.h
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FLOAT_SUITESPARSE_H_
+#define CERES_INTERNAL_FLOAT_SUITESPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <memory>
+#include "ceres/sparse_cholesky.h"
+
+#if !defined(CERES_NO_SUITESPARSE)
+
+namespace ceres {
+namespace internal {
+
+// Fake implementation of a single precision Sparse Cholesky using
+// SuiteSparse.
+class FloatSuiteSparseCholesky : public SparseCholesky {
+ public:
+  static std::unique_ptr<SparseCholesky> Create(
+      OrderingType ordering_type);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // !defined(CERES_NO_SUITESPARSE)
+
+#endif  // CERES_INTERNAL_FLOAT_SUITESPARSE_H_
diff --git a/internal/ceres/function_sample.cc b/internal/ceres/function_sample.cc
new file mode 100644
index 0000000..2fd3dbd
--- /dev/null
+++ b/internal/ceres/function_sample.cc
@@ -0,0 +1,73 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/function_sample.h"
+#include "ceres/stringprintf.h"
+
+namespace ceres {
+namespace internal {
+
+FunctionSample::FunctionSample()
+    : x(0.0),
+      vector_x_is_valid(false),
+      value(0.0),
+      value_is_valid(false),
+      vector_gradient_is_valid(false),
+      gradient(0.0),
+      gradient_is_valid(false) {}
+
+FunctionSample::FunctionSample(const double x, const double value)
+    : x(x),
+      vector_x_is_valid(false),
+      value(value),
+      value_is_valid(true),
+      vector_gradient_is_valid(false),
+      gradient(0.0),
+      gradient_is_valid(false) {}
+
+FunctionSample::FunctionSample(const double x,
+                               const double value,
+                               const double gradient)
+    : x(x),
+      vector_x_is_valid(false),
+      value(value),
+      value_is_valid(true),
+      vector_gradient_is_valid(false),
+      gradient(gradient),
+      gradient_is_valid(true) {}
+
+std::string FunctionSample::ToDebugString() const {
+  return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
+                      "value_is_valid: %d, gradient_is_valid: %d]",
+                      x, value, gradient, value_is_valid, gradient_is_valid);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/function_sample.h b/internal/ceres/function_sample.h
new file mode 100644
index 0000000..df79aef
--- /dev/null
+++ b/internal/ceres/function_sample.h
@@ -0,0 +1,94 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FUNCTION_SAMPLE_H_
+#define CERES_INTERNAL_FUNCTION_SAMPLE_H_
+
+#include <string>
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// FunctionSample is used by the line search routines to store and
+// communicate the value and (optionally) the gradient of the function
+// being minimized.
+//
+// Since line search as the name implies happens along a certain
+// line/direction. FunctionSample contains the information in two
+// ways. Information in the ambient space and information along the
+// direction of search.
+struct FunctionSample {
+  FunctionSample();
+  FunctionSample(double x, double value);
+  FunctionSample(double x, double value, double gradient);
+
+  std::string ToDebugString() const;
+
+  // x is the location of the sample along the search direction.
+  double x;
+
+  // Let p be a point and d be the search direction then
+  //
+  // vector_x = p + x * d;
+  Vector vector_x;
+  // True if vector_x has been assigned a valid value.
+  bool vector_x_is_valid;
+
+  // value = f(vector_x)
+  double value;
+  // True of the evaluation was successful and value is a finite
+  // number.
+  bool value_is_valid;
+
+  // vector_gradient = Df(vector_position);
+  //
+  // D is the derivative operator.
+  Vector vector_gradient;
+  // True if the vector gradient was evaluated and the evaluation was
+  // successful (the value is a finite number).
+  bool vector_gradient_is_valid;
+
+  // gradient = d.transpose() * vector_gradient
+  //
+  // where d is the search direction.
+  double gradient;
+  // True if the evaluation of the gradient was sucessful and the
+  // value is a finite number.
+  bool gradient_is_valid;
+};
+
+
+
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_FUNCTION_SAMPLE_H_
diff --git a/internal/ceres/generate_bundle_adjustment_tests.py b/internal/ceres/generate_bundle_adjustment_tests.py
new file mode 100644
index 0000000..a3469eb
--- /dev/null
+++ b/internal/ceres/generate_bundle_adjustment_tests.py
@@ -0,0 +1,266 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2018 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: keir@google.com (Keir Mierle)
+#
+# Generate bundle adjustment tests as separate binaries. Since the bundle
+# adjustment tests are fairly processing intensive, serializing them makes the
+# tests take forever to run. Splitting them into separate binaries makes it
+# easier to parallelize in continuous integration systems, and makes local
+# processing on multi-core workstations much faster.
+
+# Product of ORDERINGS, THREAD_CONFIGS, and SOLVER_CONFIGS is the full set of
+# tests to generate.
+ORDERINGS = ["kAutomaticOrdering", "kUserOrdering"]
+SINGLE_THREADED = "1"
+MULTI_THREADED = "4"
+THREAD_CONFIGS = [SINGLE_THREADED, MULTI_THREADED]
+
+SOLVER_CONFIGS = [
+  # Linear solver            Sparse backend      Preconditioner
+  ('DENSE_SCHUR',            'NO_SPARSE',        'IDENTITY'),
+  ('ITERATIVE_SCHUR',        'NO_SPARSE',        'JACOBI'),
+  ('ITERATIVE_SCHUR',        'NO_SPARSE',        'SCHUR_JACOBI'),
+  ('ITERATIVE_SCHUR',        'SUITE_SPARSE',     'CLUSTER_JACOBI'),
+  ('ITERATIVE_SCHUR',        'EIGEN_SPARSE',     'CLUSTER_JACOBI'),
+  ('ITERATIVE_SCHUR',        'CX_SPARSE',        'CLUSTER_JACOBI'),
+  ('ITERATIVE_SCHUR',        'ACCELERATE_SPARSE','CLUSTER_JACOBI'),
+  ('ITERATIVE_SCHUR',        'SUITE_SPARSE',     'CLUSTER_TRIDIAGONAL'),
+  ('ITERATIVE_SCHUR',        'EIGEN_SPARSE',     'CLUSTER_TRIDIAGONAL'),
+  ('ITERATIVE_SCHUR',        'CX_SPARSE',        'CLUSTER_TRIDIAGONAL'),
+  ('ITERATIVE_SCHUR',        'ACCELERATE_SPARSE','CLUSTER_TRIDIAGONAL'),
+  ('SPARSE_NORMAL_CHOLESKY', 'SUITE_SPARSE',     'IDENTITY'),
+  ('SPARSE_NORMAL_CHOLESKY', 'EIGEN_SPARSE',     'IDENTITY'),
+  ('SPARSE_NORMAL_CHOLESKY', 'CX_SPARSE',        'IDENTITY'),
+  ('SPARSE_NORMAL_CHOLESKY', 'ACCELERATE_SPARSE','IDENTITY'),
+  ('SPARSE_SCHUR',           'SUITE_SPARSE',     'IDENTITY'),
+  ('SPARSE_SCHUR',           'EIGEN_SPARSE',     'IDENTITY'),
+  ('SPARSE_SCHUR',           'CX_SPARSE',        'IDENTITY'),
+  ('SPARSE_SCHUR',           'ACCELERATE_SPARSE','IDENTITY'),
+]
+
+FILENAME_SHORTENING_MAP = dict(
+  DENSE_SCHUR='denseschur',
+  ITERATIVE_SCHUR='iterschur',
+  SPARSE_NORMAL_CHOLESKY='sparsecholesky',
+  SPARSE_SCHUR='sparseschur',
+  NO_SPARSE='',  # Omit sparse reference entirely for dense tests.
+  SUITE_SPARSE='suitesparse',
+  EIGEN_SPARSE='eigensparse',
+  CX_SPARSE='cxsparse',
+  ACCELERATE_SPARSE='acceleratesparse',
+  IDENTITY='identity',
+  JACOBI='jacobi',
+  SCHUR_JACOBI='schurjacobi',
+  CLUSTER_JACOBI='clustjacobi',
+  CLUSTER_TRIDIAGONAL='clusttri',
+  kAutomaticOrdering='auto',
+  kUserOrdering='user',
+)
+
+COPYRIGHT_HEADER = (
+"""// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.""")
+
+BUNDLE_ADJUSTMENT_TEST_TEMPLATE = (COPYRIGHT_HEADER + """
+
+#include "bundle_adjustment_test_util.h"
+%(preprocessor_conditions_begin)s
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       %(test_class_name)s) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = %(num_threads)s;
+   options->linear_solver_type = %(linear_solver)s;
+   options->sparse_linear_algebra_library_type = %(sparse_backend)s;
+   options->preconditioner_type = %(preconditioner)s;
+   if (%(ordering)s) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+%(preprocessor_conditions_end)s
+""")
+
+def camelcasify(token):
+  """Convert capitalized underscore tokens to camel case"""
+  return ''.join([x.lower().capitalize() for x in token.split('_')])
+
+
+def generate_bundle_test(linear_solver,
+                         sparse_backend,
+                         preconditioner,
+                         ordering,
+                         thread_config):
+  """Generate a bundle adjustment test executable configured appropriately"""
+
+  # Preconditioner only makes sense for iterative schur; drop it otherwise.
+  preconditioner_tag = preconditioner
+  if linear_solver != 'ITERATIVE_SCHUR':
+    preconditioner_tag = ''
+
+  # Omit references to the sparse backend when one is not in use.
+  sparse_backend_tag = sparse_backend
+  if sparse_backend == 'NO_SPARSE':
+    sparse_backend_tag = ''
+
+  # Use a double underscore; otherwise the names are harder to understand.
+  test_class_name = '_'.join(filter(lambda x: x, [
+      camelcasify(linear_solver),
+      camelcasify(sparse_backend_tag),
+      camelcasify(preconditioner_tag),
+      ordering[1:],  # Strip 'k'
+      'Threads' if thread_config == MULTI_THREADED else '']))
+
+  # Initial template parameters (augmented more below).
+  template_parameters = dict(
+          linear_solver=linear_solver,
+          sparse_backend=sparse_backend,
+          preconditioner=preconditioner,
+          ordering=ordering,
+          num_threads=thread_config,
+          test_class_name=test_class_name)
+
+  # Accumulate appropriate #ifdef/#ifndefs for the solver's sparse backend.
+  preprocessor_conditions_begin = []
+  preprocessor_conditions_end = []
+  if sparse_backend == 'SUITE_SPARSE':
+    preprocessor_conditions_begin.append('#ifndef CERES_NO_SUITESPARSE')
+    preprocessor_conditions_end.insert(0, '#endif  // CERES_NO_SUITESPARSE')
+  elif sparse_backend == 'CX_SPARSE':
+    preprocessor_conditions_begin.append('#ifndef CERES_NO_CXSPARSE')
+    preprocessor_conditions_end.insert(0, '#endif  // CERES_NO_CXSPARSE')
+  elif sparse_backend == 'ACCELERATE_SPARSE':
+    preprocessor_conditions_begin.append('#ifndef CERES_NO_ACCELERATE_SPARSE')
+    preprocessor_conditions_end.insert(0, '#endif  // CERES_NO_ACCELERATE_SPARSE')
+  elif sparse_backend == 'EIGEN_SPARSE':
+    preprocessor_conditions_begin.append('#ifdef CERES_USE_EIGEN_SPARSE')
+    preprocessor_conditions_end.insert(0, '#endif  // CERES_USE_EIGEN_SPARSE')
+
+  # Accumulate appropriate #ifdef/#ifndefs for threading conditions.
+  if thread_config == MULTI_THREADED:
+    preprocessor_conditions_begin.append('#ifndef CERES_NO_THREADS')
+    preprocessor_conditions_end.insert(0, '#endif  // CERES_NO_THREADS')
+
+  # If there are #ifdefs, put newlines around them.
+  if preprocessor_conditions_begin:
+    preprocessor_conditions_begin.insert(0, '')
+    preprocessor_conditions_begin.append('')
+    preprocessor_conditions_end.insert(0, '')
+    preprocessor_conditions_end.append('')
+
+  # Put #ifdef/#ifndef stacks into the template parameters.
+  template_parameters['preprocessor_conditions_begin'] = '\n'.join(
+      preprocessor_conditions_begin)
+  template_parameters['preprocessor_conditions_end'] = '\n'.join(
+      preprocessor_conditions_end)
+
+  # Substitute variables into the test template, and write the result to a file.
+  filename_tag = '_'.join(FILENAME_SHORTENING_MAP.get(x) for x in [
+      linear_solver,
+      sparse_backend_tag,
+      preconditioner_tag,
+      ordering]
+      if FILENAME_SHORTENING_MAP.get(x))
+  if (thread_config == MULTI_THREADED):
+    filename_tag += '_threads'
+
+  filename = ('generated_bundle_adjustment_tests/ba_%s_test.cc' %
+                filename_tag.lower())
+  with open(filename, 'w') as fd:
+    fd.write(BUNDLE_ADJUSTMENT_TEST_TEMPLATE % template_parameters)
+
+  # All done.
+  print 'Generated', filename
+
+  return filename
+
+
+if __name__ == '__main__':
+  # Iterate over all the possible configurations and generate the tests.
+  generated_files = []
+  for linear_solver, sparse_backend, preconditioner in SOLVER_CONFIGS:
+    for ordering in ORDERINGS:
+      for thread_config in THREAD_CONFIGS:
+        generated_files.append(
+            generate_bundle_test(linear_solver,
+                                 sparse_backend,
+                                 preconditioner,
+                                 ordering,
+                                 thread_config))
+
+  # Generate the CMakeLists.txt as well.
+  with open('generated_bundle_adjustment_tests/CMakeLists.txt', 'w') as fd:
+    fd.write(COPYRIGHT_HEADER.replace('//', '#').replace('http:#', 'http://'))
+    fd.write('\n')
+    fd.write('\n')
+    for generated_file in generated_files:
+      fd.write('ceres_test(%s)\n' %
+               generated_file.split('/')[1].replace('_test.cc', ''))
diff --git a/internal/ceres/generate_template_specializations.py b/internal/ceres/generate_template_specializations.py
new file mode 100644
index 0000000..75c1464
--- /dev/null
+++ b/internal/ceres/generate_template_specializations.py
@@ -0,0 +1,245 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2015 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# SchurEliminator class. It is a rather large class
+# and the number of explicit instantiations is also large. Explicitly
+# generating these instantiations in separate .cc files breaks the
+# compilation into separate compilation unit rather than one large cc
+# file which takes 2+GB of RAM to compile.
+#
+# This script creates three sets of files.
+#
+# 1. schur_eliminator_x_x_x.cc and partitioned_matrix_view_x_x_x.cc
+# where, the x indicates the template parameters and
+#
+# 2. schur_eliminator.cc & partitioned_matrix_view.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# 3. schur_templates.cc
+#
+# that contains a function which can be queried to determine what
+# template specializations are available.
+#
+# The following list of tuples, specializations indicates the set of
+# specializations that is generated.
+SPECIALIZATIONS = [(2, 2, 2),
+                   (2, 2, 3),
+                   (2, 2, 4),
+                   (2, 2, "Eigen::Dynamic"),
+                   (2, 3, 3),
+                   (2, 3, 4),
+                   (2, 3, 6),
+                   (2, 3, 9),
+                   (2, 3, "Eigen::Dynamic"),
+                   (2, 4, 3),
+                   (2, 4, 4),
+                   (2, 4, 6),
+                   (2, 4, 8),
+                   (2, 4, 9),
+                   (2, 4, "Eigen::Dynamic"),
+                   (2, "Eigen::Dynamic", "Eigen::Dynamic"),
+                   (4, 4, 2),
+                   (4, 4, 3),
+                   (4, 4, 4),
+                   (4, 4, "Eigen::Dynamic")]
+
+import schur_eliminator_template
+import partitioned_matrix_view_template
+import os
+import glob
+
+def SuffixForSize(size):
+  if size == "Eigen::Dynamic":
+    return "d"
+  return str(size)
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+  return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+                                                 e_block_size,
+                                                 f_block_size)))
+
+def GenerateFactoryConditional(row_block_size, e_block_size, f_block_size):
+  conditionals = []
+  if (row_block_size != "Eigen::Dynamic"):
+    conditionals.append("(options.row_block_size == %s)" % row_block_size)
+  if (e_block_size != "Eigen::Dynamic"):
+    conditionals.append("(options.e_block_size == %s)" % e_block_size)
+  if (f_block_size != "Eigen::Dynamic"):
+    conditionals.append("(options.f_block_size == %s)" % f_block_size)
+  if (len(conditionals) == 0):
+    return "%s"
+
+  if (len(conditionals) == 1):
+    return " if " + conditionals[0] + "{\n  %s\n }\n"
+
+  return " if (" + " &&\n     ".join(conditionals) + ") {\n  %s\n }\n"
+
+def Specialize(name, data):
+  """
+  Generate specialization code and the conditionals to instantiate it.
+  """
+
+  # Specialization files
+  for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+      output = SpecializationFilename("generated/" + name,
+                                      row_block_size,
+                                      e_block_size,
+                                      f_block_size) + ".cc"
+
+      with open(output, "w") as f:
+        f.write(data["HEADER"])
+        f.write(data["SPECIALIZATION_FILE"] %
+                  (row_block_size, e_block_size, f_block_size))
+
+  # Generate the _d_d_d specialization.
+  output = SpecializationFilename("generated/" + name,
+                                   "Eigen::Dynamic",
+                                   "Eigen::Dynamic",
+                                   "Eigen::Dynamic") + ".cc"
+  with open(output, "w") as f:
+    f.write(data["HEADER"])
+    f.write(data["DYNAMIC_FILE"] %
+              ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic"))
+
+  # Factory
+  with open(name + ".cc", "w") as f:
+    f.write(data["HEADER"])
+    f.write(data["FACTORY_FILE_HEADER"])
+    for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+        factory_conditional = GenerateFactoryConditional(
+            row_block_size, e_block_size, f_block_size)
+        factory = data["FACTORY"] % (row_block_size, e_block_size, f_block_size)
+        f.write(factory_conditional % factory);
+    f.write(data["FACTORY_FOOTER"])
+
+QUERY_HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// What template specializations are available.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+"""
+
+QUERY_FILE_HEADER = """
+#include "ceres/internal/eigen.h"
+#include "ceres/schur_templates.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+                                        int* e_block_size,
+                                        int* f_block_size) {
+  LinearSolver::Options options;
+  options.row_block_size = *row_block_size;
+  options.e_block_size = *e_block_size;
+  options.f_block_size = *f_block_size;
+  *row_block_size = Eigen::Dynamic;
+  *e_block_size = Eigen::Dynamic;
+  *f_block_size = Eigen::Dynamic;
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+QUERY_FOOTER = """
+#endif
+  return;
+}
+
+}  // namespace internal
+}  // namespace ceres
+"""
+
+QUERY_ACTION = """ *row_block_size = %s;
+   *e_block_size = %s;
+   *f_block_size = %s;
+  return;"""
+
+def GenerateQueryFile():
+  """
+  Generate file that allows querying for available template specializations.
+  """
+
+  with open("schur_templates.cc", "w") as f:
+    f.write(QUERY_HEADER)
+    f.write(QUERY_FILE_HEADER)
+    for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+      factory_conditional = GenerateFactoryConditional(
+        row_block_size, e_block_size, f_block_size)
+      action = QUERY_ACTION % (row_block_size, e_block_size, f_block_size)
+      f.write(factory_conditional % action)
+    f.write(QUERY_FOOTER)
+
+
+if __name__ == "__main__":
+  for f in glob.glob("generated/*"):
+    os.remove(f)
+
+  Specialize("schur_eliminator",
+               schur_eliminator_template.__dict__)
+  Specialize("partitioned_matrix_view",
+               partitioned_matrix_view_template.__dict__)
+  GenerateQueryFile()
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
new file mode 100644
index 0000000..86ad17b
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
new file mode 100644
index 0000000..33018d5
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
new file mode 100644
index 0000000..a429a54
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
new file mode 100644
index 0000000..f6f03ea
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
new file mode 100644
index 0000000..0b73e1a
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
new file mode 100644
index 0000000..bc4a861
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
new file mode 100644
index 0000000..fe8f7dd
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 6>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
new file mode 100644
index 0000000..ac493fc
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
new file mode 100644
index 0000000..e29efaf
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
new file mode 100644
index 0000000..e61e0a3
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
new file mode 100644
index 0000000..2e1170d
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
new file mode 100644
index 0000000..4a5590d
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 6>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
new file mode 100644
index 0000000..83015f1
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 8>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
new file mode 100644
index 0000000..25671f9
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
new file mode 100644
index 0000000..d259802
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
new file mode 100644
index 0000000..c956759
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
new file mode 100644
index 0000000..f08049c
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
new file mode 100644
index 0000000..9342612
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
new file mode 100644
index 0000000..8b273fa
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
new file mode 100644
index 0000000..e8b45e4
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
new file mode 100644
index 0000000..3545b86
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -0,0 +1,52 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/generated/schur_eliminator_2_2_2.cc b/internal/ceres/generated/schur_eliminator_2_2_2.cc
new file mode 100644
index 0000000..79fcf43
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_2_3.cc b/internal/ceres/generated/schur_eliminator_2_2_3.cc
new file mode 100644
index 0000000..edd7fb6
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_2_4.cc b/internal/ceres/generated/schur_eliminator_2_2_4.cc
new file mode 100644
index 0000000..692267d
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_2_d.cc b/internal/ceres/generated/schur_eliminator_2_2_d.cc
new file mode 100644
index 0000000..33d9c6d
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_3_3.cc b/internal/ceres/generated/schur_eliminator_2_3_3.cc
new file mode 100644
index 0000000..4a5e2fe
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_3_4.cc b/internal/ceres/generated/schur_eliminator_2_3_4.cc
new file mode 100644
index 0000000..7ee63d0
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_3_6.cc b/internal/ceres/generated/schur_eliminator_2_3_6.cc
new file mode 100644
index 0000000..108760e
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 6>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_3_9.cc b/internal/ceres/generated/schur_eliminator_2_3_9.cc
new file mode 100644
index 0000000..4fea2fa
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_3_d.cc b/internal/ceres/generated/schur_eliminator_2_3_d.cc
new file mode 100644
index 0000000..0d13c99
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_3.cc b/internal/ceres/generated/schur_eliminator_2_4_3.cc
new file mode 100644
index 0000000..3827c65
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_4.cc b/internal/ceres/generated/schur_eliminator_2_4_4.cc
new file mode 100644
index 0000000..47bdfab
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_6.cc b/internal/ceres/generated/schur_eliminator_2_4_6.cc
new file mode 100644
index 0000000..3777be2
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 6>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_8.cc b/internal/ceres/generated/schur_eliminator_2_4_8.cc
new file mode 100644
index 0000000..862c76a
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_8.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 8>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_9.cc b/internal/ceres/generated/schur_eliminator_2_4_9.cc
new file mode 100644
index 0000000..5b5b7cc
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_9.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 9>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_d.cc b/internal/ceres/generated/schur_eliminator_2_4_d.cc
new file mode 100644
index 0000000..ce2d450
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_d_d.cc b/internal/ceres/generated/schur_eliminator_2_d_d.cc
new file mode 100644
index 0000000..9b02bd9
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_d_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_2.cc b/internal/ceres/generated/schur_eliminator_4_4_2.cc
new file mode 100644
index 0000000..10f709d
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 2>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_3.cc b/internal/ceres/generated/schur_eliminator_4_4_3.cc
new file mode 100644
index 0000000..bcbcc74
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 3>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_4.cc b/internal/ceres/generated/schur_eliminator_4_4_4.cc
new file mode 100644
index 0000000..44ecc87
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 4>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_d.cc b/internal/ceres/generated/schur_eliminator_4_4_d.cc
new file mode 100644
index 0000000..69c8563
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_d_d_d.cc b/internal/ceres/generated/schur_eliminator_d_d_d.cc
new file mode 100644
index 0000000..348708b
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -0,0 +1,52 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>;
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/generated_bundle_adjustment_tests/CMakeLists.txt b/internal/ceres/generated_bundle_adjustment_tests/CMakeLists.txt
new file mode 100644
index 0000000..db2d233
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/CMakeLists.txt
@@ -0,0 +1,113 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2018 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# ========================================
+# THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+# THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+# THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+# THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+# ========================================
+#
+# This file is generated using generate_bundle_adjustment_tests.py.
+
+ceres_test(ba_denseschur_auto)
+ceres_test(ba_denseschur_auto_threads)
+ceres_test(ba_denseschur_user)
+ceres_test(ba_denseschur_user_threads)
+ceres_test(ba_iterschur_jacobi_auto)
+ceres_test(ba_iterschur_jacobi_auto_threads)
+ceres_test(ba_iterschur_jacobi_user)
+ceres_test(ba_iterschur_jacobi_user_threads)
+ceres_test(ba_iterschur_schurjacobi_auto)
+ceres_test(ba_iterschur_schurjacobi_auto_threads)
+ceres_test(ba_iterschur_schurjacobi_user)
+ceres_test(ba_iterschur_schurjacobi_user_threads)
+ceres_test(ba_iterschur_suitesparse_clustjacobi_auto)
+ceres_test(ba_iterschur_suitesparse_clustjacobi_auto_threads)
+ceres_test(ba_iterschur_suitesparse_clustjacobi_user)
+ceres_test(ba_iterschur_suitesparse_clustjacobi_user_threads)
+ceres_test(ba_iterschur_eigensparse_clustjacobi_auto)
+ceres_test(ba_iterschur_eigensparse_clustjacobi_auto_threads)
+ceres_test(ba_iterschur_eigensparse_clustjacobi_user)
+ceres_test(ba_iterschur_eigensparse_clustjacobi_user_threads)
+ceres_test(ba_iterschur_cxsparse_clustjacobi_auto)
+ceres_test(ba_iterschur_cxsparse_clustjacobi_auto_threads)
+ceres_test(ba_iterschur_cxsparse_clustjacobi_user)
+ceres_test(ba_iterschur_cxsparse_clustjacobi_user_threads)
+ceres_test(ba_iterschur_acceleratesparse_clustjacobi_auto)
+ceres_test(ba_iterschur_acceleratesparse_clustjacobi_auto_threads)
+ceres_test(ba_iterschur_acceleratesparse_clustjacobi_user)
+ceres_test(ba_iterschur_acceleratesparse_clustjacobi_user_threads)
+ceres_test(ba_iterschur_suitesparse_clusttri_auto)
+ceres_test(ba_iterschur_suitesparse_clusttri_auto_threads)
+ceres_test(ba_iterschur_suitesparse_clusttri_user)
+ceres_test(ba_iterschur_suitesparse_clusttri_user_threads)
+ceres_test(ba_iterschur_eigensparse_clusttri_auto)
+ceres_test(ba_iterschur_eigensparse_clusttri_auto_threads)
+ceres_test(ba_iterschur_eigensparse_clusttri_user)
+ceres_test(ba_iterschur_eigensparse_clusttri_user_threads)
+ceres_test(ba_iterschur_cxsparse_clusttri_auto)
+ceres_test(ba_iterschur_cxsparse_clusttri_auto_threads)
+ceres_test(ba_iterschur_cxsparse_clusttri_user)
+ceres_test(ba_iterschur_cxsparse_clusttri_user_threads)
+ceres_test(ba_iterschur_acceleratesparse_clusttri_auto)
+ceres_test(ba_iterschur_acceleratesparse_clusttri_auto_threads)
+ceres_test(ba_iterschur_acceleratesparse_clusttri_user)
+ceres_test(ba_iterschur_acceleratesparse_clusttri_user_threads)
+ceres_test(ba_sparsecholesky_suitesparse_auto)
+ceres_test(ba_sparsecholesky_suitesparse_auto_threads)
+ceres_test(ba_sparsecholesky_suitesparse_user)
+ceres_test(ba_sparsecholesky_suitesparse_user_threads)
+ceres_test(ba_sparsecholesky_eigensparse_auto)
+ceres_test(ba_sparsecholesky_eigensparse_auto_threads)
+ceres_test(ba_sparsecholesky_eigensparse_user)
+ceres_test(ba_sparsecholesky_eigensparse_user_threads)
+ceres_test(ba_sparsecholesky_cxsparse_auto)
+ceres_test(ba_sparsecholesky_cxsparse_auto_threads)
+ceres_test(ba_sparsecholesky_cxsparse_user)
+ceres_test(ba_sparsecholesky_cxsparse_user_threads)
+ceres_test(ba_sparsecholesky_acceleratesparse_auto)
+ceres_test(ba_sparsecholesky_acceleratesparse_auto_threads)
+ceres_test(ba_sparsecholesky_acceleratesparse_user)
+ceres_test(ba_sparsecholesky_acceleratesparse_user_threads)
+ceres_test(ba_sparseschur_suitesparse_auto)
+ceres_test(ba_sparseschur_suitesparse_auto_threads)
+ceres_test(ba_sparseschur_suitesparse_user)
+ceres_test(ba_sparseschur_suitesparse_user_threads)
+ceres_test(ba_sparseschur_eigensparse_auto)
+ceres_test(ba_sparseschur_eigensparse_auto_threads)
+ceres_test(ba_sparseschur_eigensparse_user)
+ceres_test(ba_sparseschur_eigensparse_user_threads)
+ceres_test(ba_sparseschur_cxsparse_auto)
+ceres_test(ba_sparseschur_cxsparse_auto_threads)
+ceres_test(ba_sparseschur_cxsparse_user)
+ceres_test(ba_sparseschur_cxsparse_user_threads)
+ceres_test(ba_sparseschur_acceleratesparse_auto)
+ceres_test(ba_sparseschur_acceleratesparse_auto_threads)
+ceres_test(ba_sparseschur_acceleratesparse_user)
+ceres_test(ba_sparseschur_acceleratesparse_user_threads)
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_test.cc
new file mode 100644
index 0000000..642e9f2
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       DenseSchur_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = DENSE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_threads_test.cc
new file mode 100644
index 0000000..10b19b7
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_auto_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       DenseSchur_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = DENSE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_test.cc
new file mode 100644
index 0000000..f852a46
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       DenseSchur_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = DENSE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_threads_test.cc
new file mode 100644
index 0000000..324998f
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_denseschur_user_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       DenseSchur_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = DENSE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_test.cc
new file mode 100644
index 0000000..30443af
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterJacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_threads_test.cc
new file mode 100644
index 0000000..f61e1d6
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterJacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_test.cc
new file mode 100644
index 0000000..6de1e4b
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterJacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_threads_test.cc
new file mode 100644
index 0000000..74cb674
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clustjacobi_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterJacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_test.cc
new file mode 100644
index 0000000..4168466
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterTridiagonal_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_threads_test.cc
new file mode 100644
index 0000000..11c962b
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterTridiagonal_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_test.cc
new file mode 100644
index 0000000..b8cb11d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterTridiagonal_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_threads_test.cc
new file mode 100644
index 0000000..ff2e8de
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_acceleratesparse_clusttri_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_AccelerateSparse_ClusterTridiagonal_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_test.cc
new file mode 100644
index 0000000..4ee23a5
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterJacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_threads_test.cc
new file mode 100644
index 0000000..85c5d83
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterJacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_test.cc
new file mode 100644
index 0000000..5b03651
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterJacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_threads_test.cc
new file mode 100644
index 0000000..e855875
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clustjacobi_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterJacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_test.cc
new file mode 100644
index 0000000..acc6e0d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterTridiagonal_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_threads_test.cc
new file mode 100644
index 0000000..62c9924
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterTridiagonal_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_test.cc
new file mode 100644
index 0000000..bd86d9c
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterTridiagonal_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_threads_test.cc
new file mode 100644
index 0000000..ea53a6e
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_cxsparse_clusttri_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_CxSparse_ClusterTridiagonal_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_test.cc
new file mode 100644
index 0000000..78cd02e
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterJacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_threads_test.cc
new file mode 100644
index 0000000..83693b1
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterJacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_test.cc
new file mode 100644
index 0000000..dcc42cd
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterJacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_threads_test.cc
new file mode 100644
index 0000000..8566dce
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clustjacobi_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterJacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_test.cc
new file mode 100644
index 0000000..aef25fe
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterTridiagonal_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_threads_test.cc
new file mode 100644
index 0000000..58389c6
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterTridiagonal_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_test.cc
new file mode 100644
index 0000000..eee6da0
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterTridiagonal_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_threads_test.cc
new file mode 100644
index 0000000..826239b
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_eigensparse_clusttri_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_EigenSparse_ClusterTridiagonal_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_test.cc
new file mode 100644
index 0000000..e59be90
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_Jacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_threads_test.cc
new file mode 100644
index 0000000..7bcdad2
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_auto_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_Jacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_test.cc
new file mode 100644
index 0000000..c58f2fd
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_Jacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_threads_test.cc
new file mode 100644
index 0000000..ef73824
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_jacobi_user_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_Jacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_test.cc
new file mode 100644
index 0000000..0f775e1
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SchurJacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = SCHUR_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_threads_test.cc
new file mode 100644
index 0000000..9d91c81
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_auto_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SchurJacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = SCHUR_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_test.cc
new file mode 100644
index 0000000..564104c
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SchurJacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = SCHUR_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_threads_test.cc
new file mode 100644
index 0000000..f37de7a
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_schurjacobi_user_threads_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SchurJacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = NO_SPARSE;
+   options->preconditioner_type = SCHUR_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_test.cc
new file mode 100644
index 0000000..71b774e
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterJacobi_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_threads_test.cc
new file mode 100644
index 0000000..23706a6
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterJacobi_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_test.cc
new file mode 100644
index 0000000..e2a3bb2
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterJacobi_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_threads_test.cc
new file mode 100644
index 0000000..05b270c
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clustjacobi_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterJacobi_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_JACOBI;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_test.cc
new file mode 100644
index 0000000..04ad42e
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterTridiagonal_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_threads_test.cc
new file mode 100644
index 0000000..2164c11
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterTridiagonal_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_test.cc
new file mode 100644
index 0000000..a0c213c
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterTridiagonal_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_threads_test.cc
new file mode 100644
index 0000000..15eb761
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_iterschur_suitesparse_clusttri_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       IterativeSchur_SuiteSparse_ClusterTridiagonal_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = ITERATIVE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = CLUSTER_TRIDIAGONAL;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_test.cc
new file mode 100644
index 0000000..4683d2b
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_AccelerateSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_threads_test.cc
new file mode 100644
index 0000000..1b63415
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_AccelerateSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_test.cc
new file mode 100644
index 0000000..bdc7d62
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_AccelerateSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_threads_test.cc
new file mode 100644
index 0000000..36567f1
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_acceleratesparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_AccelerateSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_test.cc
new file mode 100644
index 0000000..b47daef
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_CxSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_threads_test.cc
new file mode 100644
index 0000000..22d1002
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_CxSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_test.cc
new file mode 100644
index 0000000..e0c5268
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_CxSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_threads_test.cc
new file mode 100644
index 0000000..2ea905a
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_cxsparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_CxSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_test.cc
new file mode 100644
index 0000000..ce502d6
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_EigenSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_threads_test.cc
new file mode 100644
index 0000000..b91a6d4
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_EigenSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_test.cc
new file mode 100644
index 0000000..e17caf6
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_EigenSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_threads_test.cc
new file mode 100644
index 0000000..b17f480
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_eigensparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_EigenSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_test.cc
new file mode 100644
index 0000000..e114a51
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_SuiteSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_threads_test.cc
new file mode 100644
index 0000000..3340f2f
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_SuiteSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_test.cc
new file mode 100644
index 0000000..8998e90
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_SuiteSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_threads_test.cc
new file mode 100644
index 0000000..b13e26d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparsecholesky_suitesparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseNormalCholesky_SuiteSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_test.cc
new file mode 100644
index 0000000..b43a26a
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_AccelerateSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_threads_test.cc
new file mode 100644
index 0000000..7e330c4
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_AccelerateSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_test.cc
new file mode 100644
index 0000000..b5738b7
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_AccelerateSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_threads_test.cc
new file mode 100644
index 0000000..b5c6105
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_acceleratesparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_AccelerateSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_test.cc
new file mode 100644
index 0000000..6d9fb7c
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_CxSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_threads_test.cc
new file mode 100644
index 0000000..69e8cce
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_CxSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_test.cc
new file mode 100644
index 0000000..f60b7c1
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_CxSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_threads_test.cc
new file mode 100644
index 0000000..cfb9798
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_cxsparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_CXSPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_CxSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = CX_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_CXSPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_test.cc
new file mode 100644
index 0000000..a143591
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_EigenSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_threads_test.cc
new file mode 100644
index 0000000..111c2a4
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_EigenSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_test.cc
new file mode 100644
index 0000000..9ad242e
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_EigenSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_threads_test.cc
new file mode 100644
index 0000000..abd914d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_eigensparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_EigenSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_USE_EIGEN_SPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_test.cc
new file mode 100644
index 0000000..21d0e50
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_SuiteSparse_AutomaticOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_threads_test.cc
new file mode 100644
index 0000000..145eb9d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_auto_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_SuiteSparse_AutomaticOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kAutomaticOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_test.cc
new file mode 100644
index 0000000..f73cc7d
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_test.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_SuiteSparse_UserOrdering) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 1;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_threads_test.cc b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_threads_test.cc
new file mode 100644
index 0000000..b0d67d7
--- /dev/null
+++ b/internal/ceres/generated_bundle_adjustment_tests/ba_sparseschur_suitesparse_user_threads_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// ========================================
+//
+// This file is generated using generate_bundle_adjustment_tests.py.
+
+#include "bundle_adjustment_test_util.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#ifndef CERES_NO_THREADS
+
+namespace ceres {
+namespace internal {
+
+TEST_F(BundleAdjustmentTest,
+       SparseSchur_SuiteSparse_UserOrdering_Threads) {  // NOLINT
+   BundleAdjustmentProblem bundle_adjustment_problem;
+   Solver::Options* options =
+     bundle_adjustment_problem.mutable_solver_options();
+   options->num_threads = 4;
+   options->linear_solver_type = SPARSE_SCHUR;
+   options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+   options->preconditioner_type = IDENTITY;
+   if (kUserOrdering) {
+     options->linear_solver_ordering.reset();
+   }
+   Problem* problem = bundle_adjustment_problem.mutable_problem();
+   RunSolverForConfigAndExpectResidualsMatch(*options, problem);
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_THREADS
+#endif  // CERES_NO_SUITESPARSE
+
diff --git a/internal/ceres/gmock/gmock.h b/internal/ceres/gmock/gmock.h
new file mode 100644
index 0000000..cd54177
--- /dev/null
+++ b/internal/ceres/gmock/gmock.h
@@ -0,0 +1,14978 @@
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This is the main header file a user should include.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_H_
+
+// This file implements the following syntax:
+//
+//   ON_CALL(mock_object.Method(...))
+//     .With(...) ?
+//     .WillByDefault(...);
+//
+// where With() is optional and WillByDefault() must appear exactly
+// once.
+//
+//   EXPECT_CALL(mock_object.Method(...))
+//     .With(...) ?
+//     .Times(...) ?
+//     .InSequence(...) *
+//     .WillOnce(...) *
+//     .WillRepeatedly(...) ?
+//     .RetiresOnSaturation() ? ;
+//
+// where all clauses are optional and WillOnce() can be repeated.
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used actions.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif
+
+#include <algorithm>
+#include <string>
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file defines some utilities useful for implementing Google
+// Mock.  They are subject to change without notice, so please DO NOT
+// USE THEM IN USER CODE.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+
+#include <stdio.h>
+#include <ostream>  // NOLINT
+#include <string>
+
+// This file was GENERATED by command:
+//     pump.py gmock-generated-internal-utils.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file contains template meta-programming utility classes needed
+// for implementing Google Mock.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vadimb@google.com (Vadim Berman)
+//
+// Low-level types and utilities for porting Google Mock to various
+// platforms.  All macros ending with _ and symbols defined in an
+// internal namespace are subject to change without notice.  Code
+// outside Google Mock MUST NOT USE THEM DIRECTLY.  Macros that don't
+// end with _ are part of Google Mock's public API and can be used by
+// code outside Google Mock.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+
+#include <assert.h>
+#include <stdlib.h>
+#include <iostream>
+
+// Most of the utilities needed for porting Google Mock are also
+// required for Google Test and are defined in gtest-port.h.
+//
+// Note to maintainers: to reduce code duplication, prefer adding
+// portability utilities to Google Test's gtest-port.h instead of
+// here, as Google Mock depends on Google Test.  Only add a utility
+// here if it's truly specific to Google Mock.
+#include "gtest/gtest.h"
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Injection point for custom user configurations.
+// The following macros can be defined:
+//
+//   Flag related macros:
+//     GMOCK_DECLARE_bool_(name)
+//     GMOCK_DECLARE_int32_(name)
+//     GMOCK_DECLARE_string_(name)
+//     GMOCK_DEFINE_bool_(name, default_val, doc)
+//     GMOCK_DEFINE_int32_(name, default_val, doc)
+//     GMOCK_DEFINE_string_(name, default_val, doc)
+//
+// ** Custom implementation starts here **
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
+
+#endif  // GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
+
+// To avoid conditional compilation everywhere, we make it
+// gmock-port.h's responsibility to #include the header implementing
+// tr1/tuple.  gmock-port.h does this via gtest-port.h, which is
+// guaranteed to pull in the tuple header.
+
+// For MS Visual C++, check the compiler version. At least VS 2003 is
+// required to compile Google Mock.
+#if defined(_MSC_VER) && _MSC_VER < 1310
+# error "At least Visual C++ 2003 (7.1) is required to compile Google Mock."
+#endif
+
+// Macro for referencing flags.  This is public as we want the user to
+// use this syntax to reference Google Mock flags.
+#define GMOCK_FLAG(name) FLAGS_gmock_##name
+
+#if !defined(GMOCK_DECLARE_bool_)
+
+// Macros for declaring flags.
+#define GMOCK_DECLARE_bool_(name) extern GTEST_API_ bool GMOCK_FLAG(name)
+#define GMOCK_DECLARE_int32_(name) \
+    extern GTEST_API_ ::testing::internal::Int32 GMOCK_FLAG(name)
+#define GMOCK_DECLARE_string_(name) \
+    extern GTEST_API_ ::std::string GMOCK_FLAG(name)
+
+// Macros for defining flags.
+#define GMOCK_DEFINE_bool_(name, default_val, doc) \
+    GTEST_API_ bool GMOCK_FLAG(name) = (default_val)
+#define GMOCK_DEFINE_int32_(name, default_val, doc) \
+    GTEST_API_ ::testing::internal::Int32 GMOCK_FLAG(name) = (default_val)
+#define GMOCK_DEFINE_string_(name, default_val, doc) \
+    GTEST_API_ ::std::string GMOCK_FLAG(name) = (default_val)
+
+#endif  // !defined(GMOCK_DECLARE_bool_)
+
+#endif  // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+
+namespace testing {
+
+template <typename T>
+class Matcher;
+
+namespace internal {
+
+// An IgnoredValue object can be implicitly constructed from ANY value.
+// This is used in implementing the IgnoreResult(a) action.
+class IgnoredValue {
+ public:
+  // This constructor template allows any value to be implicitly
+  // converted to IgnoredValue.  The object has no data member and
+  // doesn't try to remember anything about the argument.  We
+  // deliberately omit the 'explicit' keyword in order to allow the
+  // conversion to be implicit.
+  template <typename T>
+  IgnoredValue(const T& /* ignored */) {}  // NOLINT(runtime/explicit)
+};
+
+// MatcherTuple<T>::type is a tuple type where each field is a Matcher
+// for the corresponding field in tuple type T.
+template <typename Tuple>
+struct MatcherTuple;
+
+template <>
+struct MatcherTuple< ::testing::tuple<> > {
+  typedef ::testing::tuple< > type;
+};
+
+template <typename A1>
+struct MatcherTuple< ::testing::tuple<A1> > {
+  typedef ::testing::tuple<Matcher<A1> > type;
+};
+
+template <typename A1, typename A2>
+struct MatcherTuple< ::testing::tuple<A1, A2> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2> > type;
+};
+
+template <typename A1, typename A2, typename A3>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>,
+      Matcher<A4> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5, A6> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5>, Matcher<A6> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6, typename A7>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5, A6, A7> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5>, Matcher<A6>, Matcher<A7> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6, typename A7, typename A8>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6, typename A7, typename A8, typename A9>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8>, Matcher<A9> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+    typename A6, typename A7, typename A8, typename A9, typename A10>
+struct MatcherTuple< ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+    A10> > {
+  typedef ::testing::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+      Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8>, Matcher<A9>,
+      Matcher<A10> > type;
+};
+
+// Template struct Function<F>, where F must be a function type, contains
+// the following typedefs:
+//
+//   Result:               the function's return type.
+//   ArgumentN:            the type of the N-th argument, where N starts with 1.
+//   ArgumentTuple:        the tuple type consisting of all parameters of F.
+//   ArgumentMatcherTuple: the tuple type consisting of Matchers for all
+//                         parameters of F.
+//   MakeResultVoid:       the function type obtained by substituting void
+//                         for the return type of F.
+//   MakeResultIgnoredValue:
+//                         the function type obtained by substituting Something
+//                         for the return type of F.
+template <typename F>
+struct Function;
+
+template <typename R>
+struct Function<R()> {
+  typedef R Result;
+  typedef ::testing::tuple<> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid();
+  typedef IgnoredValue MakeResultIgnoredValue();
+};
+
+template <typename R, typename A1>
+struct Function<R(A1)>
+    : Function<R()> {
+  typedef A1 Argument1;
+  typedef ::testing::tuple<A1> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1);
+  typedef IgnoredValue MakeResultIgnoredValue(A1);
+};
+
+template <typename R, typename A1, typename A2>
+struct Function<R(A1, A2)>
+    : Function<R(A1)> {
+  typedef A2 Argument2;
+  typedef ::testing::tuple<A1, A2> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2);
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+struct Function<R(A1, A2, A3)>
+    : Function<R(A1, A2)> {
+  typedef A3 Argument3;
+  typedef ::testing::tuple<A1, A2, A3> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+struct Function<R(A1, A2, A3, A4)>
+    : Function<R(A1, A2, A3)> {
+  typedef A4 Argument4;
+  typedef ::testing::tuple<A1, A2, A3, A4> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+struct Function<R(A1, A2, A3, A4, A5)>
+    : Function<R(A1, A2, A3, A4)> {
+  typedef A5 Argument5;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+struct Function<R(A1, A2, A3, A4, A5, A6)>
+    : Function<R(A1, A2, A3, A4, A5)> {
+  typedef A6 Argument6;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5, A6> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7)>
+    : Function<R(A1, A2, A3, A4, A5, A6)> {
+  typedef A7 Argument7;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5, A6, A7> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8)>
+    : Function<R(A1, A2, A3, A4, A5, A6, A7)> {
+  typedef A8 Argument8;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>
+    : Function<R(A1, A2, A3, A4, A5, A6, A7, A8)> {
+  typedef A9 Argument9;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8, A9);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8,
+      A9);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9,
+    typename A10>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)>
+    : Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+  typedef A10 Argument10;
+  typedef ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+      A10> ArgumentTuple;
+  typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+  typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10);
+  typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8,
+      A9, A10);
+};
+
+}  // namespace internal
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+
+namespace testing {
+namespace internal {
+
+// Converts an identifier name to a space-separated list of lower-case
+// words.  Each maximum substring of the form [A-Za-z][a-z]*|\d+ is
+// treated as one word.  For example, both "FooBar123" and
+// "foo_bar_123" are converted to "foo bar 123".
+GTEST_API_ string ConvertIdentifierNameToWords(const char* id_name);
+
+// PointeeOf<Pointer>::type is the type of a value pointed to by a
+// Pointer, which can be either a smart pointer or a raw pointer.  The
+// following default implementation is for the case where Pointer is a
+// smart pointer.
+template <typename Pointer>
+struct PointeeOf {
+  // Smart pointer classes define type element_type as the type of
+  // their pointees.
+  typedef typename Pointer::element_type type;
+};
+// This specialization is for the raw pointer case.
+template <typename T>
+struct PointeeOf<T*> { typedef T type; };  // NOLINT
+
+// GetRawPointer(p) returns the raw pointer underlying p when p is a
+// smart pointer, or returns p itself when p is already a raw pointer.
+// The following default implementation is for the smart pointer case.
+template <typename Pointer>
+inline const typename Pointer::element_type* GetRawPointer(const Pointer& p) {
+  return p.get();
+}
+// This overloaded version is for the raw pointer case.
+template <typename Element>
+inline Element* GetRawPointer(Element* p) { return p; }
+
+// This comparator allows linked_ptr to be stored in sets.
+template <typename T>
+struct LinkedPtrLessThan {
+  bool operator()(const ::testing::internal::linked_ptr<T>& lhs,
+                  const ::testing::internal::linked_ptr<T>& rhs) const {
+    return lhs.get() < rhs.get();
+  }
+};
+
+// Symbian compilation can be done with wchar_t being either a native
+// type or a typedef.  Using Google Mock with OpenC without wchar_t
+// should require the definition of _STLP_NO_WCHAR_T.
+//
+// MSVC treats wchar_t as a native type usually, but treats it as the
+// same as unsigned short when the compiler option /Zc:wchar_t- is
+// specified.  It defines _NATIVE_WCHAR_T_DEFINED symbol when wchar_t
+// is a native type.
+#if (GTEST_OS_SYMBIAN && defined(_STLP_NO_WCHAR_T)) || \
+    (defined(_MSC_VER) && !defined(_NATIVE_WCHAR_T_DEFINED))
+// wchar_t is a typedef.
+#else
+# define GMOCK_WCHAR_T_IS_NATIVE_ 1
+#endif
+
+// signed wchar_t and unsigned wchar_t are NOT in the C++ standard.
+// Using them is a bad practice and not portable.  So DON'T use them.
+//
+// Still, Google Mock is designed to work even if the user uses signed
+// wchar_t or unsigned wchar_t (obviously, assuming the compiler
+// supports them).
+//
+// To gcc,
+//   wchar_t == signed wchar_t != unsigned wchar_t == unsigned int
+#ifdef __GNUC__
+// signed/unsigned wchar_t are valid types.
+# define GMOCK_HAS_SIGNED_WCHAR_T_ 1
+#endif
+
+// In what follows, we use the term "kind" to indicate whether a type
+// is bool, an integer type (excluding bool), a floating-point type,
+// or none of them.  This categorization is useful for determining
+// when a matcher argument type can be safely converted to another
+// type in the implementation of SafeMatcherCast.
+enum TypeKind {
+  kBool, kInteger, kFloatingPoint, kOther
+};
+
+// KindOf<T>::value is the kind of type T.
+template <typename T> struct KindOf {
+  enum { value = kOther };  // The default kind.
+};
+
+// This macro declares that the kind of 'type' is 'kind'.
+#define GMOCK_DECLARE_KIND_(type, kind) \
+  template <> struct KindOf<type> { enum { value = kind }; }
+
+GMOCK_DECLARE_KIND_(bool, kBool);
+
+// All standard integer types.
+GMOCK_DECLARE_KIND_(char, kInteger);
+GMOCK_DECLARE_KIND_(signed char, kInteger);
+GMOCK_DECLARE_KIND_(unsigned char, kInteger);
+GMOCK_DECLARE_KIND_(short, kInteger);  // NOLINT
+GMOCK_DECLARE_KIND_(unsigned short, kInteger);  // NOLINT
+GMOCK_DECLARE_KIND_(int, kInteger);
+GMOCK_DECLARE_KIND_(unsigned int, kInteger);
+GMOCK_DECLARE_KIND_(long, kInteger);  // NOLINT
+GMOCK_DECLARE_KIND_(unsigned long, kInteger);  // NOLINT
+
+#if GMOCK_WCHAR_T_IS_NATIVE_
+GMOCK_DECLARE_KIND_(wchar_t, kInteger);
+#endif
+
+// Non-standard integer types.
+GMOCK_DECLARE_KIND_(Int64, kInteger);
+GMOCK_DECLARE_KIND_(UInt64, kInteger);
+
+// All standard floating-point types.
+GMOCK_DECLARE_KIND_(float, kFloatingPoint);
+GMOCK_DECLARE_KIND_(double, kFloatingPoint);
+GMOCK_DECLARE_KIND_(long double, kFloatingPoint);
+
+#undef GMOCK_DECLARE_KIND_
+
+// Evaluates to the kind of 'type'.
+#define GMOCK_KIND_OF_(type) \
+  static_cast< ::testing::internal::TypeKind>( \
+      ::testing::internal::KindOf<type>::value)
+
+// Evaluates to true iff integer type T is signed.
+#define GMOCK_IS_SIGNED_(T) (static_cast<T>(-1) < 0)
+
+// LosslessArithmeticConvertibleImpl<kFromKind, From, kToKind, To>::value
+// is true iff arithmetic type From can be losslessly converted to
+// arithmetic type To.
+//
+// It's the user's responsibility to ensure that both From and To are
+// raw (i.e. has no CV modifier, is not a pointer, and is not a
+// reference) built-in arithmetic types, kFromKind is the kind of
+// From, and kToKind is the kind of To; the value is
+// implementation-defined when the above pre-condition is violated.
+template <TypeKind kFromKind, typename From, TypeKind kToKind, typename To>
+struct LosslessArithmeticConvertibleImpl : public false_type {};
+
+// Converting bool to bool is lossless.
+template <>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kBool, bool>
+    : public true_type {};  // NOLINT
+
+// Converting bool to any integer type is lossless.
+template <typename To>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kInteger, To>
+    : public true_type {};  // NOLINT
+
+// Converting bool to any floating-point type is lossless.
+template <typename To>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kFloatingPoint, To>
+    : public true_type {};  // NOLINT
+
+// Converting an integer to bool is lossy.
+template <typename From>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kBool, bool>
+    : public false_type {};  // NOLINT
+
+// Converting an integer to another non-bool integer is lossless iff
+// the target type's range encloses the source type's range.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kInteger, To>
+    : public bool_constant<
+      // When converting from a smaller size to a larger size, we are
+      // fine as long as we are not converting from signed to unsigned.
+      ((sizeof(From) < sizeof(To)) &&
+       (!GMOCK_IS_SIGNED_(From) || GMOCK_IS_SIGNED_(To))) ||
+      // When converting between the same size, the signedness must match.
+      ((sizeof(From) == sizeof(To)) &&
+       (GMOCK_IS_SIGNED_(From) == GMOCK_IS_SIGNED_(To)))> {};  // NOLINT
+
+#undef GMOCK_IS_SIGNED_
+
+// Converting an integer to a floating-point type may be lossy, since
+// the format of a floating-point number is implementation-defined.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kFloatingPoint, To>
+    : public false_type {};  // NOLINT
+
+// Converting a floating-point to bool is lossy.
+template <typename From>
+struct LosslessArithmeticConvertibleImpl<kFloatingPoint, From, kBool, bool>
+    : public false_type {};  // NOLINT
+
+// Converting a floating-point to an integer is lossy.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kFloatingPoint, From, kInteger, To>
+    : public false_type {};  // NOLINT
+
+// Converting a floating-point to another floating-point is lossless
+// iff the target type is at least as big as the source type.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<
+  kFloatingPoint, From, kFloatingPoint, To>
+    : public bool_constant<sizeof(From) <= sizeof(To)> {};  // NOLINT
+
+// LosslessArithmeticConvertible<From, To>::value is true iff arithmetic
+// type From can be losslessly converted to arithmetic type To.
+//
+// It's the user's responsibility to ensure that both From and To are
+// raw (i.e. has no CV modifier, is not a pointer, and is not a
+// reference) built-in arithmetic types; the value is
+// implementation-defined when the above pre-condition is violated.
+template <typename From, typename To>
+struct LosslessArithmeticConvertible
+    : public LosslessArithmeticConvertibleImpl<
+  GMOCK_KIND_OF_(From), From, GMOCK_KIND_OF_(To), To> {};  // NOLINT
+
+// This interface knows how to report a Google Mock failure (either
+// non-fatal or fatal).
+class FailureReporterInterface {
+ public:
+  // The type of a failure (either non-fatal or fatal).
+  enum FailureType {
+    kNonfatal, kFatal
+  };
+
+  virtual ~FailureReporterInterface() {}
+
+  // Reports a failure that occurred at the given source file location.
+  virtual void ReportFailure(FailureType type, const char* file, int line,
+                             const string& message) = 0;
+};
+
+// Returns the failure reporter used by Google Mock.
+GTEST_API_ FailureReporterInterface* GetFailureReporter();
+
+// Asserts that condition is true; aborts the process with the given
+// message if condition is false.  We cannot use LOG(FATAL) or CHECK()
+// as Google Mock might be used to mock the log sink itself.  We
+// inline this function to prevent it from showing up in the stack
+// trace.
+inline void Assert(bool condition, const char* file, int line,
+                   const string& msg) {
+  if (!condition) {
+    GetFailureReporter()->ReportFailure(FailureReporterInterface::kFatal,
+                                        file, line, msg);
+  }
+}
+inline void Assert(bool condition, const char* file, int line) {
+  Assert(condition, file, line, "Assertion failed.");
+}
+
+// Verifies that condition is true; generates a non-fatal failure if
+// condition is false.
+inline void Expect(bool condition, const char* file, int line,
+                   const string& msg) {
+  if (!condition) {
+    GetFailureReporter()->ReportFailure(FailureReporterInterface::kNonfatal,
+                                        file, line, msg);
+  }
+}
+inline void Expect(bool condition, const char* file, int line) {
+  Expect(condition, file, line, "Expectation failed.");
+}
+
+// Severity level of a log.
+enum LogSeverity {
+  kInfo = 0,
+  kWarning = 1
+};
+
+// Valid values for the --gmock_verbose flag.
+
+// All logs (informational and warnings) are printed.
+const char kInfoVerbosity[] = "info";
+// Only warnings are printed.
+const char kWarningVerbosity[] = "warning";
+// No logs are printed.
+const char kErrorVerbosity[] = "error";
+
+// Returns true iff a log with the given severity is visible according
+// to the --gmock_verbose flag.
+GTEST_API_ bool LogIsVisible(LogSeverity severity);
+
+// Prints the given message to stdout iff 'severity' >= the level
+// specified by the --gmock_verbose flag.  If stack_frames_to_skip >=
+// 0, also prints the stack trace excluding the top
+// stack_frames_to_skip frames.  In opt mode, any positive
+// stack_frames_to_skip is treated as 0, since we don't know which
+// function calls will be inlined by the compiler and need to be
+// conservative.
+GTEST_API_ void Log(LogSeverity severity,
+                    const string& message,
+                    int stack_frames_to_skip);
+
+// TODO(wan@google.com): group all type utilities together.
+
+// Type traits.
+
+// is_reference<T>::value is non-zero iff T is a reference type.
+template <typename T> struct is_reference : public false_type {};
+template <typename T> struct is_reference<T&> : public true_type {};
+
+// type_equals<T1, T2>::value is non-zero iff T1 and T2 are the same type.
+template <typename T1, typename T2> struct type_equals : public false_type {};
+template <typename T> struct type_equals<T, T> : public true_type {};
+
+// remove_reference<T>::type removes the reference from type T, if any.
+template <typename T> struct remove_reference { typedef T type; };  // NOLINT
+template <typename T> struct remove_reference<T&> { typedef T type; }; // NOLINT
+
+// DecayArray<T>::type turns an array type U[N] to const U* and preserves
+// other types.  Useful for saving a copy of a function argument.
+template <typename T> struct DecayArray { typedef T type; };  // NOLINT
+template <typename T, size_t N> struct DecayArray<T[N]> {
+  typedef const T* type;
+};
+// Sometimes people use arrays whose size is not available at the use site
+// (e.g. extern const char kNamePrefix[]).  This specialization covers that
+// case.
+template <typename T> struct DecayArray<T[]> {
+  typedef const T* type;
+};
+
+// Disable MSVC warnings for infinite recursion, since in this case the
+// the recursion is unreachable.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4717)
+#endif
+
+// Invalid<T>() is usable as an expression of type T, but will terminate
+// the program with an assertion failure if actually run.  This is useful
+// when a value of type T is needed for compilation, but the statement
+// will not really be executed (or we don't care if the statement
+// crashes).
+template <typename T>
+inline T Invalid() {
+  Assert(false, "", -1, "Internal error: attempt to return invalid value");
+  // This statement is unreachable, and would never terminate even if it
+  // could be reached. It is provided only to placate compiler warnings
+  // about missing return statements.
+  return Invalid<T>();
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+// Given a raw type (i.e. having no top-level reference or const
+// modifier) RawContainer that's either an STL-style container or a
+// native array, class StlContainerView<RawContainer> has the
+// following members:
+//
+//   - type is a type that provides an STL-style container view to
+//     (i.e. implements the STL container concept for) RawContainer;
+//   - const_reference is a type that provides a reference to a const
+//     RawContainer;
+//   - ConstReference(raw_container) returns a const reference to an STL-style
+//     container view to raw_container, which is a RawContainer.
+//   - Copy(raw_container) returns an STL-style container view of a
+//     copy of raw_container, which is a RawContainer.
+//
+// This generic version is used when RawContainer itself is already an
+// STL-style container.
+template <class RawContainer>
+class StlContainerView {
+ public:
+  typedef RawContainer type;
+  typedef const type& const_reference;
+
+  static const_reference ConstReference(const RawContainer& container) {
+    // Ensures that RawContainer is not a const type.
+    testing::StaticAssertTypeEq<RawContainer,
+        GTEST_REMOVE_CONST_(RawContainer)>();
+    return container;
+  }
+  static type Copy(const RawContainer& container) { return container; }
+};
+
+// This specialization is used when RawContainer is a native array type.
+template <typename Element, size_t N>
+class StlContainerView<Element[N]> {
+ public:
+  typedef GTEST_REMOVE_CONST_(Element) RawElement;
+  typedef internal::NativeArray<RawElement> type;
+  // NativeArray<T> can represent a native array either by value or by
+  // reference (selected by a constructor argument), so 'const type'
+  // can be used to reference a const native array.  We cannot
+  // 'typedef const type& const_reference' here, as that would mean
+  // ConstReference() has to return a reference to a local variable.
+  typedef const type const_reference;
+
+  static const_reference ConstReference(const Element (&array)[N]) {
+    // Ensures that Element is not a const type.
+    testing::StaticAssertTypeEq<Element, RawElement>();
+#if GTEST_OS_SYMBIAN
+    // The Nokia Symbian compiler confuses itself in template instantiation
+    // for this call without the cast to Element*:
+    // function call '[testing::internal::NativeArray<char *>].NativeArray(
+    //     {lval} const char *[4], long, testing::internal::RelationToSource)'
+    //     does not match
+    // 'testing::internal::NativeArray<char *>::NativeArray(
+    //     char *const *, unsigned int, testing::internal::RelationToSource)'
+    // (instantiating: 'testing::internal::ContainsMatcherImpl
+    //     <const char * (&)[4]>::Matches(const char * (&)[4]) const')
+    // (instantiating: 'testing::internal::StlContainerView<char *[4]>::
+    //     ConstReference(const char * (&)[4])')
+    // (and though the N parameter type is mismatched in the above explicit
+    // conversion of it doesn't help - only the conversion of the array).
+    return type(const_cast<Element*>(&array[0]), N,
+                RelationToSourceReference());
+#else
+    return type(array, N, RelationToSourceReference());
+#endif  // GTEST_OS_SYMBIAN
+  }
+  static type Copy(const Element (&array)[N]) {
+#if GTEST_OS_SYMBIAN
+    return type(const_cast<Element*>(&array[0]), N, RelationToSourceCopy());
+#else
+    return type(array, N, RelationToSourceCopy());
+#endif  // GTEST_OS_SYMBIAN
+  }
+};
+
+// This specialization is used when RawContainer is a native array
+// represented as a (pointer, size) tuple.
+template <typename ElementPointer, typename Size>
+class StlContainerView< ::testing::tuple<ElementPointer, Size> > {
+ public:
+  typedef GTEST_REMOVE_CONST_(
+      typename internal::PointeeOf<ElementPointer>::type) RawElement;
+  typedef internal::NativeArray<RawElement> type;
+  typedef const type const_reference;
+
+  static const_reference ConstReference(
+      const ::testing::tuple<ElementPointer, Size>& array) {
+    return type(get<0>(array), get<1>(array), RelationToSourceReference());
+  }
+  static type Copy(const ::testing::tuple<ElementPointer, Size>& array) {
+    return type(get<0>(array), get<1>(array), RelationToSourceCopy());
+  }
+};
+
+// The following specialization prevents the user from instantiating
+// StlContainer with a reference type.
+template <typename T> class StlContainerView<T&>;
+
+// A type transform to remove constness from the first part of a pair.
+// Pairs like that are used as the value_type of associative containers,
+// and this transform produces a similar but assignable pair.
+template <typename T>
+struct RemoveConstFromKey {
+  typedef T type;
+};
+
+// Partially specialized to remove constness from std::pair<const K, V>.
+template <typename K, typename V>
+struct RemoveConstFromKey<std::pair<const K, V> > {
+  typedef std::pair<K, V> type;
+};
+
+// Mapping from booleans to types. Similar to boost::bool_<kValue> and
+// std::integral_constant<bool, kValue>.
+template <bool kValue>
+struct BooleanConstant {};
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+
+
+#if GTEST_HAS_STD_TYPE_TRAITS_  // Defined by gtest-port.h via gmock-port.h.
+#include <type_traits>
+#endif
+
+namespace testing {
+
+// To implement an action Foo, define:
+//   1. a class FooAction that implements the ActionInterface interface, and
+//   2. a factory function that creates an Action object from a
+//      const FooAction*.
+//
+// The two-level delegation design follows that of Matcher, providing
+// consistency for extension developers.  It also eases ownership
+// management as Action objects can now be copied like plain values.
+
+namespace internal {
+
+template <typename F1, typename F2>
+class ActionAdaptor;
+
+// BuiltInDefaultValueGetter<T, true>::Get() returns a
+// default-constructed T value.  BuiltInDefaultValueGetter<T,
+// false>::Get() crashes with an error.
+//
+// This primary template is used when kDefaultConstructible is true.
+template <typename T, bool kDefaultConstructible>
+struct BuiltInDefaultValueGetter {
+  static T Get() { return T(); }
+};
+template <typename T>
+struct BuiltInDefaultValueGetter<T, false> {
+  static T Get() {
+    Assert(false, __FILE__, __LINE__,
+           "Default action undefined for the function return type.");
+    return internal::Invalid<T>();
+    // The above statement will never be reached, but is required in
+    // order for this function to compile.
+  }
+};
+
+// BuiltInDefaultValue<T>::Get() returns the "built-in" default value
+// for type T, which is NULL when T is a raw pointer type, 0 when T is
+// a numeric type, false when T is bool, or "" when T is string or
+// std::string.  In addition, in C++11 and above, it turns a
+// default-constructed T value if T is default constructible.  For any
+// other type T, the built-in default T value is undefined, and the
+// function will abort the process.
+template <typename T>
+class BuiltInDefaultValue {
+ public:
+#if GTEST_HAS_STD_TYPE_TRAITS_
+  // This function returns true iff type T has a built-in default value.
+  static bool Exists() {
+    return ::std::is_default_constructible<T>::value;
+  }
+
+  static T Get() {
+    return BuiltInDefaultValueGetter<
+        T, ::std::is_default_constructible<T>::value>::Get();
+  }
+
+#else  // GTEST_HAS_STD_TYPE_TRAITS_
+  // This function returns true iff type T has a built-in default value.
+  static bool Exists() {
+    return false;
+  }
+
+  static T Get() {
+    return BuiltInDefaultValueGetter<T, false>::Get();
+  }
+
+#endif  // GTEST_HAS_STD_TYPE_TRAITS_
+};
+
+// This partial specialization says that we use the same built-in
+// default value for T and const T.
+template <typename T>
+class BuiltInDefaultValue<const T> {
+ public:
+  static bool Exists() { return BuiltInDefaultValue<T>::Exists(); }
+  static T Get() { return BuiltInDefaultValue<T>::Get(); }
+};
+
+// This partial specialization defines the default values for pointer
+// types.
+template <typename T>
+class BuiltInDefaultValue<T*> {
+ public:
+  static bool Exists() { return true; }
+  static T* Get() { return NULL; }
+};
+
+// The following specializations define the default values for
+// specific types we care about.
+#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \
+  template <> \
+  class BuiltInDefaultValue<type> { \
+   public: \
+    static bool Exists() { return true; } \
+    static type Get() { return value; } \
+  }
+
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, );  // NOLINT
+#if GTEST_HAS_GLOBAL_STRING
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::string, "");
+#endif  // GTEST_HAS_GLOBAL_STRING
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, "");
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0');
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0');
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0');
+
+// There's no need for a default action for signed wchar_t, as that
+// type is the same as wchar_t for gcc, and invalid for MSVC.
+//
+// There's also no need for a default action for unsigned wchar_t, as
+// that type is the same as unsigned int for gcc, and invalid for
+// MSVC.
+#if GMOCK_WCHAR_T_IS_NATIVE_
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U);  // NOLINT
+#endif
+
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U);  // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0);     // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL);  // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L);     // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(UInt64, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(Int64, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0);
+
+#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_
+
+}  // namespace internal
+
+// When an unexpected function call is encountered, Google Mock will
+// let it return a default value if the user has specified one for its
+// return type, or if the return type has a built-in default value;
+// otherwise Google Mock won't know what value to return and will have
+// to abort the process.
+//
+// The DefaultValue<T> class allows a user to specify the
+// default value for a type T that is both copyable and publicly
+// destructible (i.e. anything that can be used as a function return
+// type).  The usage is:
+//
+//   // Sets the default value for type T to be foo.
+//   DefaultValue<T>::Set(foo);
+template <typename T>
+class DefaultValue {
+ public:
+  // Sets the default value for type T; requires T to be
+  // copy-constructable and have a public destructor.
+  static void Set(T x) {
+    delete producer_;
+    producer_ = new FixedValueProducer(x);
+  }
+
+  // Provides a factory function to be called to generate the default value.
+  // This method can be used even if T is only move-constructible, but it is not
+  // limited to that case.
+  typedef T (*FactoryFunction)();
+  static void SetFactory(FactoryFunction factory) {
+    delete producer_;
+    producer_ = new FactoryValueProducer(factory);
+  }
+
+  // Unsets the default value for type T.
+  static void Clear() {
+    delete producer_;
+    producer_ = NULL;
+  }
+
+  // Returns true iff the user has set the default value for type T.
+  static bool IsSet() { return producer_ != NULL; }
+
+  // Returns true if T has a default return value set by the user or there
+  // exists a built-in default value.
+  static bool Exists() {
+    return IsSet() || internal::BuiltInDefaultValue<T>::Exists();
+  }
+
+  // Returns the default value for type T if the user has set one;
+  // otherwise returns the built-in default value. Requires that Exists()
+  // is true, which ensures that the return value is well-defined.
+  static T Get() {
+    return producer_ == NULL ?
+        internal::BuiltInDefaultValue<T>::Get() : producer_->Produce();
+  }
+
+ private:
+  class ValueProducer {
+   public:
+    virtual ~ValueProducer() {}
+    virtual T Produce() = 0;
+  };
+
+  class FixedValueProducer : public ValueProducer {
+   public:
+    explicit FixedValueProducer(T value) : value_(value) {}
+    virtual T Produce() { return value_; }
+
+   private:
+    const T value_;
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(FixedValueProducer);
+  };
+
+  class FactoryValueProducer : public ValueProducer {
+   public:
+    explicit FactoryValueProducer(FactoryFunction factory)
+        : factory_(factory) {}
+    virtual T Produce() { return factory_(); }
+
+   private:
+    const FactoryFunction factory_;
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(FactoryValueProducer);
+  };
+
+  static ValueProducer* producer_;
+};
+
+// This partial specialization allows a user to set default values for
+// reference types.
+template <typename T>
+class DefaultValue<T&> {
+ public:
+  // Sets the default value for type T&.
+  static void Set(T& x) {  // NOLINT
+    address_ = &x;
+  }
+
+  // Unsets the default value for type T&.
+  static void Clear() {
+    address_ = NULL;
+  }
+
+  // Returns true iff the user has set the default value for type T&.
+  static bool IsSet() { return address_ != NULL; }
+
+  // Returns true if T has a default return value set by the user or there
+  // exists a built-in default value.
+  static bool Exists() {
+    return IsSet() || internal::BuiltInDefaultValue<T&>::Exists();
+  }
+
+  // Returns the default value for type T& if the user has set one;
+  // otherwise returns the built-in default value if there is one;
+  // otherwise aborts the process.
+  static T& Get() {
+    return address_ == NULL ?
+        internal::BuiltInDefaultValue<T&>::Get() : *address_;
+  }
+
+ private:
+  static T* address_;
+};
+
+// This specialization allows DefaultValue<void>::Get() to
+// compile.
+template <>
+class DefaultValue<void> {
+ public:
+  static bool Exists() { return true; }
+  static void Get() {}
+};
+
+// Points to the user-set default value for type T.
+template <typename T>
+typename DefaultValue<T>::ValueProducer* DefaultValue<T>::producer_ = NULL;
+
+// Points to the user-set default value for type T&.
+template <typename T>
+T* DefaultValue<T&>::address_ = NULL;
+
+// Implement this interface to define an action for function type F.
+template <typename F>
+class ActionInterface {
+ public:
+  typedef typename internal::Function<F>::Result Result;
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  ActionInterface() {}
+  virtual ~ActionInterface() {}
+
+  // Performs the action.  This method is not const, as in general an
+  // action can have side effects and be stateful.  For example, a
+  // get-the-next-element-from-the-collection action will need to
+  // remember the current element.
+  virtual Result Perform(const ArgumentTuple& args) = 0;
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface);
+};
+
+// An Action<F> is a copyable and IMMUTABLE (except by assignment)
+// object that represents an action to be taken when a mock function
+// of type F is called.  The implementation of Action<T> is just a
+// linked_ptr to const ActionInterface<T>, so copying is fairly cheap.
+// Don't inherit from Action!
+//
+// You can view an object implementing ActionInterface<F> as a
+// concrete action (including its current state), and an Action<F>
+// object as a handle to it.
+template <typename F>
+class Action {
+ public:
+  typedef typename internal::Function<F>::Result Result;
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  // Constructs a null Action.  Needed for storing Action objects in
+  // STL containers.
+  Action() : impl_(NULL) {}
+
+  // Constructs an Action from its implementation.  A NULL impl is
+  // used to represent the "do-default" action.
+  explicit Action(ActionInterface<F>* impl) : impl_(impl) {}
+
+  // Copy constructor.
+  Action(const Action& action) : impl_(action.impl_) {}
+
+  // This constructor allows us to turn an Action<Func> object into an
+  // Action<F>, as long as F's arguments can be implicitly converted
+  // to Func's and Func's return type can be implicitly converted to
+  // F's.
+  template <typename Func>
+  explicit Action(const Action<Func>& action);
+
+  // Returns true iff this is the DoDefault() action.
+  bool IsDoDefault() const { return impl_.get() == NULL; }
+
+  // Performs the action.  Note that this method is const even though
+  // the corresponding method in ActionInterface is not.  The reason
+  // is that a const Action<F> means that it cannot be re-bound to
+  // another concrete action, not that the concrete action it binds to
+  // cannot change state.  (Think of the difference between a const
+  // pointer and a pointer to const.)
+  Result Perform(const ArgumentTuple& args) const {
+    internal::Assert(
+        !IsDoDefault(), __FILE__, __LINE__,
+        "You are using DoDefault() inside a composite action like "
+        "DoAll() or WithArgs().  This is not supported for technical "
+        "reasons.  Please instead spell out the default action, or "
+        "assign the default action to an Action variable and use "
+        "the variable in various places.");
+    return impl_->Perform(args);
+  }
+
+ private:
+  template <typename F1, typename F2>
+  friend class internal::ActionAdaptor;
+
+  internal::linked_ptr<ActionInterface<F> > impl_;
+};
+
+// The PolymorphicAction class template makes it easy to implement a
+// polymorphic action (i.e. an action that can be used in mock
+// functions of than one type, e.g. Return()).
+//
+// To define a polymorphic action, a user first provides a COPYABLE
+// implementation class that has a Perform() method template:
+//
+//   class FooAction {
+//    public:
+//     template <typename Result, typename ArgumentTuple>
+//     Result Perform(const ArgumentTuple& args) const {
+//       // Processes the arguments and returns a result, using
+//       // tr1::get<N>(args) to get the N-th (0-based) argument in the tuple.
+//     }
+//     ...
+//   };
+//
+// Then the user creates the polymorphic action using
+// MakePolymorphicAction(object) where object has type FooAction.  See
+// the definition of Return(void) and SetArgumentPointee<N>(value) for
+// complete examples.
+template <typename Impl>
+class PolymorphicAction {
+ public:
+  explicit PolymorphicAction(const Impl& impl) : impl_(impl) {}
+
+  template <typename F>
+  operator Action<F>() const {
+    return Action<F>(new MonomorphicImpl<F>(impl_));
+  }
+
+ private:
+  template <typename F>
+  class MonomorphicImpl : public ActionInterface<F> {
+   public:
+    typedef typename internal::Function<F>::Result Result;
+    typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
+
+    virtual Result Perform(const ArgumentTuple& args) {
+      return impl_.template Perform<Result>(args);
+    }
+
+   private:
+    Impl impl_;
+
+    GTEST_DISALLOW_ASSIGN_(MonomorphicImpl);
+  };
+
+  Impl impl_;
+
+  GTEST_DISALLOW_ASSIGN_(PolymorphicAction);
+};
+
+// Creates an Action from its implementation and returns it.  The
+// created Action object owns the implementation.
+template <typename F>
+Action<F> MakeAction(ActionInterface<F>* impl) {
+  return Action<F>(impl);
+}
+
+// Creates a polymorphic action from its implementation.  This is
+// easier to use than the PolymorphicAction<Impl> constructor as it
+// doesn't require you to explicitly write the template argument, e.g.
+//
+//   MakePolymorphicAction(foo);
+// vs
+//   PolymorphicAction<TypeOfFoo>(foo);
+template <typename Impl>
+inline PolymorphicAction<Impl> MakePolymorphicAction(const Impl& impl) {
+  return PolymorphicAction<Impl>(impl);
+}
+
+namespace internal {
+
+// Allows an Action<F2> object to pose as an Action<F1>, as long as F2
+// and F1 are compatible.
+template <typename F1, typename F2>
+class ActionAdaptor : public ActionInterface<F1> {
+ public:
+  typedef typename internal::Function<F1>::Result Result;
+  typedef typename internal::Function<F1>::ArgumentTuple ArgumentTuple;
+
+  explicit ActionAdaptor(const Action<F2>& from) : impl_(from.impl_) {}
+
+  virtual Result Perform(const ArgumentTuple& args) {
+    return impl_->Perform(args);
+  }
+
+ private:
+  const internal::linked_ptr<ActionInterface<F2> > impl_;
+
+  GTEST_DISALLOW_ASSIGN_(ActionAdaptor);
+};
+
+// Helper struct to specialize ReturnAction to execute a move instead of a copy
+// on return. Useful for move-only types, but could be used on any type.
+template <typename T>
+struct ByMoveWrapper {
+  explicit ByMoveWrapper(T value) : payload(internal::move(value)) {}
+  T payload;
+};
+
+// Implements the polymorphic Return(x) action, which can be used in
+// any function that returns the type of x, regardless of the argument
+// types.
+//
+// Note: The value passed into Return must be converted into
+// Function<F>::Result when this action is cast to Action<F> rather than
+// when that action is performed. This is important in scenarios like
+//
+// MOCK_METHOD1(Method, T(U));
+// ...
+// {
+//   Foo foo;
+//   X x(&foo);
+//   EXPECT_CALL(mock, Method(_)).WillOnce(Return(x));
+// }
+//
+// In the example above the variable x holds reference to foo which leaves
+// scope and gets destroyed.  If copying X just copies a reference to foo,
+// that copy will be left with a hanging reference.  If conversion to T
+// makes a copy of foo, the above code is safe. To support that scenario, we
+// need to make sure that the type conversion happens inside the EXPECT_CALL
+// statement, and conversion of the result of Return to Action<T(U)> is a
+// good place for that.
+//
+template <typename R>
+class ReturnAction {
+ public:
+  // Constructs a ReturnAction object from the value to be returned.
+  // 'value' is passed by value instead of by const reference in order
+  // to allow Return("string literal") to compile.
+  explicit ReturnAction(R value) : value_(new R(internal::move(value))) {}
+
+  // This template type conversion operator allows Return(x) to be
+  // used in ANY function that returns x's type.
+  template <typename F>
+  operator Action<F>() const {
+    // Assert statement belongs here because this is the best place to verify
+    // conditions on F. It produces the clearest error messages
+    // in most compilers.
+    // Impl really belongs in this scope as a local class but can't
+    // because MSVC produces duplicate symbols in different translation units
+    // in this case. Until MS fixes that bug we put Impl into the class scope
+    // and put the typedef both here (for use in assert statement) and
+    // in the Impl class. But both definitions must be the same.
+    typedef typename Function<F>::Result Result;
+    GTEST_COMPILE_ASSERT_(
+        !is_reference<Result>::value,
+        use_ReturnRef_instead_of_Return_to_return_a_reference);
+    return Action<F>(new Impl<R, F>(value_));
+  }
+
+ private:
+  // Implements the Return(x) action for a particular function type F.
+  template <typename R_, typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+    // The implicit cast is necessary when Result has more than one
+    // single-argument constructor (e.g. Result is std::vector<int>) and R
+    // has a type conversion operator template.  In that case, value_(value)
+    // won't compile as the compiler doesn't known which constructor of
+    // Result to call.  ImplicitCast_ forces the compiler to convert R to
+    // Result without considering explicit constructors, thus resolving the
+    // ambiguity. value_ is then initialized using its copy constructor.
+    explicit Impl(const linked_ptr<R>& value)
+        : value_before_cast_(*value),
+          value_(ImplicitCast_<Result>(value_before_cast_)) {}
+
+    virtual Result Perform(const ArgumentTuple&) { return value_; }
+
+   private:
+    GTEST_COMPILE_ASSERT_(!is_reference<Result>::value,
+                          Result_cannot_be_a_reference_type);
+    // We save the value before casting just in case it is being cast to a
+    // wrapper type.
+    R value_before_cast_;
+    Result value_;
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl);
+  };
+
+  // Partially specialize for ByMoveWrapper. This version of ReturnAction will
+  // move its contents instead.
+  template <typename R_, typename F>
+  class Impl<ByMoveWrapper<R_>, F> : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit Impl(const linked_ptr<R>& wrapper)
+        : performed_(false), wrapper_(wrapper) {}
+
+    virtual Result Perform(const ArgumentTuple&) {
+      GTEST_CHECK_(!performed_)
+          << "A ByMove() action should only be performed once.";
+      performed_ = true;
+      return internal::move(wrapper_->payload);
+    }
+
+   private:
+    bool performed_;
+    const linked_ptr<R> wrapper_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  const linked_ptr<R> value_;
+
+  GTEST_DISALLOW_ASSIGN_(ReturnAction);
+};
+
+// Implements the ReturnNull() action.
+class ReturnNullAction {
+ public:
+  // Allows ReturnNull() to be used in any pointer-returning function. In C++11
+  // this is enforced by returning nullptr, and in non-C++11 by asserting a
+  // pointer type on compile time.
+  template <typename Result, typename ArgumentTuple>
+  static Result Perform(const ArgumentTuple&) {
+#if GTEST_LANG_CXX11
+    return nullptr;
+#else
+    GTEST_COMPILE_ASSERT_(internal::is_pointer<Result>::value,
+                          ReturnNull_can_be_used_to_return_a_pointer_only);
+    return NULL;
+#endif  // GTEST_LANG_CXX11
+  }
+};
+
+// Implements the Return() action.
+class ReturnVoidAction {
+ public:
+  // Allows Return() to be used in any void-returning function.
+  template <typename Result, typename ArgumentTuple>
+  static void Perform(const ArgumentTuple&) {
+    CompileAssertTypesEqual<void, Result>();
+  }
+};
+
+// Implements the polymorphic ReturnRef(x) action, which can be used
+// in any function that returns a reference to the type of x,
+// regardless of the argument types.
+template <typename T>
+class ReturnRefAction {
+ public:
+  // Constructs a ReturnRefAction object from the reference to be returned.
+  explicit ReturnRefAction(T& ref) : ref_(ref) {}  // NOLINT
+
+  // This template type conversion operator allows ReturnRef(x) to be
+  // used in ANY function that returns a reference to x's type.
+  template <typename F>
+  operator Action<F>() const {
+    typedef typename Function<F>::Result Result;
+    // Asserts that the function return type is a reference.  This
+    // catches the user error of using ReturnRef(x) when Return(x)
+    // should be used, and generates some helpful error message.
+    GTEST_COMPILE_ASSERT_(internal::is_reference<Result>::value,
+                          use_Return_instead_of_ReturnRef_to_return_a_value);
+    return Action<F>(new Impl<F>(ref_));
+  }
+
+ private:
+  // Implements the ReturnRef(x) action for a particular function type F.
+  template <typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit Impl(T& ref) : ref_(ref) {}  // NOLINT
+
+    virtual Result Perform(const ArgumentTuple&) {
+      return ref_;
+    }
+
+   private:
+    T& ref_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  T& ref_;
+
+  GTEST_DISALLOW_ASSIGN_(ReturnRefAction);
+};
+
+// Implements the polymorphic ReturnRefOfCopy(x) action, which can be
+// used in any function that returns a reference to the type of x,
+// regardless of the argument types.
+template <typename T>
+class ReturnRefOfCopyAction {
+ public:
+  // Constructs a ReturnRefOfCopyAction object from the reference to
+  // be returned.
+  explicit ReturnRefOfCopyAction(const T& value) : value_(value) {}  // NOLINT
+
+  // This template type conversion operator allows ReturnRefOfCopy(x) to be
+  // used in ANY function that returns a reference to x's type.
+  template <typename F>
+  operator Action<F>() const {
+    typedef typename Function<F>::Result Result;
+    // Asserts that the function return type is a reference.  This
+    // catches the user error of using ReturnRefOfCopy(x) when Return(x)
+    // should be used, and generates some helpful error message.
+    GTEST_COMPILE_ASSERT_(
+        internal::is_reference<Result>::value,
+        use_Return_instead_of_ReturnRefOfCopy_to_return_a_value);
+    return Action<F>(new Impl<F>(value_));
+  }
+
+ private:
+  // Implements the ReturnRefOfCopy(x) action for a particular function type F.
+  template <typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit Impl(const T& value) : value_(value) {}  // NOLINT
+
+    virtual Result Perform(const ArgumentTuple&) {
+      return value_;
+    }
+
+   private:
+    T value_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  const T value_;
+
+  GTEST_DISALLOW_ASSIGN_(ReturnRefOfCopyAction);
+};
+
+// Implements the polymorphic DoDefault() action.
+class DoDefaultAction {
+ public:
+  // This template type conversion operator allows DoDefault() to be
+  // used in any function.
+  template <typename F>
+  operator Action<F>() const { return Action<F>(NULL); }
+};
+
+// Implements the Assign action to set a given pointer referent to a
+// particular value.
+template <typename T1, typename T2>
+class AssignAction {
+ public:
+  AssignAction(T1* ptr, T2 value) : ptr_(ptr), value_(value) {}
+
+  template <typename Result, typename ArgumentTuple>
+  void Perform(const ArgumentTuple& /* args */) const {
+    *ptr_ = value_;
+  }
+
+ private:
+  T1* const ptr_;
+  const T2 value_;
+
+  GTEST_DISALLOW_ASSIGN_(AssignAction);
+};
+
+#if !GTEST_OS_WINDOWS_MOBILE
+
+// Implements the SetErrnoAndReturn action to simulate return from
+// various system calls and libc functions.
+template <typename T>
+class SetErrnoAndReturnAction {
+ public:
+  SetErrnoAndReturnAction(int errno_value, T result)
+      : errno_(errno_value),
+        result_(result) {}
+  template <typename Result, typename ArgumentTuple>
+  Result Perform(const ArgumentTuple& /* args */) const {
+    errno = errno_;
+    return result_;
+  }
+
+ private:
+  const int errno_;
+  const T result_;
+
+  GTEST_DISALLOW_ASSIGN_(SetErrnoAndReturnAction);
+};
+
+#endif  // !GTEST_OS_WINDOWS_MOBILE
+
+// Implements the SetArgumentPointee<N>(x) action for any function
+// whose N-th argument (0-based) is a pointer to x's type.  The
+// template parameter kIsProto is true iff type A is ProtocolMessage,
+// proto2::Message, or a sub-class of those.
+template <size_t N, typename A, bool kIsProto>
+class SetArgumentPointeeAction {
+ public:
+  // Constructs an action that sets the variable pointed to by the
+  // N-th function argument to 'value'.
+  explicit SetArgumentPointeeAction(const A& value) : value_(value) {}
+
+  template <typename Result, typename ArgumentTuple>
+  void Perform(const ArgumentTuple& args) const {
+    CompileAssertTypesEqual<void, Result>();
+    *::testing::get<N>(args) = value_;
+  }
+
+ private:
+  const A value_;
+
+  GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction);
+};
+
+template <size_t N, typename Proto>
+class SetArgumentPointeeAction<N, Proto, true> {
+ public:
+  // Constructs an action that sets the variable pointed to by the
+  // N-th function argument to 'proto'.  Both ProtocolMessage and
+  // proto2::Message have the CopyFrom() method, so the same
+  // implementation works for both.
+  explicit SetArgumentPointeeAction(const Proto& proto) : proto_(new Proto) {
+    proto_->CopyFrom(proto);
+  }
+
+  template <typename Result, typename ArgumentTuple>
+  void Perform(const ArgumentTuple& args) const {
+    CompileAssertTypesEqual<void, Result>();
+    ::testing::get<N>(args)->CopyFrom(*proto_);
+  }
+
+ private:
+  const internal::linked_ptr<Proto> proto_;
+
+  GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction);
+};
+
+// Implements the InvokeWithoutArgs(f) action.  The template argument
+// FunctionImpl is the implementation type of f, which can be either a
+// function pointer or a functor.  InvokeWithoutArgs(f) can be used as an
+// Action<F> as long as f's type is compatible with F (i.e. f can be
+// assigned to a tr1::function<F>).
+template <typename FunctionImpl>
+class InvokeWithoutArgsAction {
+ public:
+  // The c'tor makes a copy of function_impl (either a function
+  // pointer or a functor).
+  explicit InvokeWithoutArgsAction(FunctionImpl function_impl)
+      : function_impl_(function_impl) {}
+
+  // Allows InvokeWithoutArgs(f) to be used as any action whose type is
+  // compatible with f.
+  template <typename Result, typename ArgumentTuple>
+  Result Perform(const ArgumentTuple&) { return function_impl_(); }
+
+ private:
+  FunctionImpl function_impl_;
+
+  GTEST_DISALLOW_ASSIGN_(InvokeWithoutArgsAction);
+};
+
+// Implements the InvokeWithoutArgs(object_ptr, &Class::Method) action.
+template <class Class, typename MethodPtr>
+class InvokeMethodWithoutArgsAction {
+ public:
+  InvokeMethodWithoutArgsAction(Class* obj_ptr, MethodPtr method_ptr)
+      : obj_ptr_(obj_ptr), method_ptr_(method_ptr) {}
+
+  template <typename Result, typename ArgumentTuple>
+  Result Perform(const ArgumentTuple&) const {
+    return (obj_ptr_->*method_ptr_)();
+  }
+
+ private:
+  Class* const obj_ptr_;
+  const MethodPtr method_ptr_;
+
+  GTEST_DISALLOW_ASSIGN_(InvokeMethodWithoutArgsAction);
+};
+
+// Implements the IgnoreResult(action) action.
+template <typename A>
+class IgnoreResultAction {
+ public:
+  explicit IgnoreResultAction(const A& action) : action_(action) {}
+
+  template <typename F>
+  operator Action<F>() const {
+    // Assert statement belongs here because this is the best place to verify
+    // conditions on F. It produces the clearest error messages
+    // in most compilers.
+    // Impl really belongs in this scope as a local class but can't
+    // because MSVC produces duplicate symbols in different translation units
+    // in this case. Until MS fixes that bug we put Impl into the class scope
+    // and put the typedef both here (for use in assert statement) and
+    // in the Impl class. But both definitions must be the same.
+    typedef typename internal::Function<F>::Result Result;
+
+    // Asserts at compile time that F returns void.
+    CompileAssertTypesEqual<void, Result>();
+
+    return Action<F>(new Impl<F>(action_));
+  }
+
+ private:
+  template <typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename internal::Function<F>::Result Result;
+    typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit Impl(const A& action) : action_(action) {}
+
+    virtual void Perform(const ArgumentTuple& args) {
+      // Performs the action and ignores its result.
+      action_.Perform(args);
+    }
+
+   private:
+    // Type OriginalFunction is the same as F except that its return
+    // type is IgnoredValue.
+    typedef typename internal::Function<F>::MakeResultIgnoredValue
+        OriginalFunction;
+
+    const Action<OriginalFunction> action_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  const A action_;
+
+  GTEST_DISALLOW_ASSIGN_(IgnoreResultAction);
+};
+
+// A ReferenceWrapper<T> object represents a reference to type T,
+// which can be either const or not.  It can be explicitly converted
+// from, and implicitly converted to, a T&.  Unlike a reference,
+// ReferenceWrapper<T> can be copied and can survive template type
+// inference.  This is used to support by-reference arguments in the
+// InvokeArgument<N>(...) action.  The idea was from "reference
+// wrappers" in tr1, which we don't have in our source tree yet.
+template <typename T>
+class ReferenceWrapper {
+ public:
+  // Constructs a ReferenceWrapper<T> object from a T&.
+  explicit ReferenceWrapper(T& l_value) : pointer_(&l_value) {}  // NOLINT
+
+  // Allows a ReferenceWrapper<T> object to be implicitly converted to
+  // a T&.
+  operator T&() const { return *pointer_; }
+ private:
+  T* pointer_;
+};
+
+// Allows the expression ByRef(x) to be printed as a reference to x.
+template <typename T>
+void PrintTo(const ReferenceWrapper<T>& ref, ::std::ostream* os) {
+  T& value = ref;
+  UniversalPrinter<T&>::Print(value, os);
+}
+
+// Does two actions sequentially.  Used for implementing the DoAll(a1,
+// a2, ...) action.
+template <typename Action1, typename Action2>
+class DoBothAction {
+ public:
+  DoBothAction(Action1 action1, Action2 action2)
+      : action1_(action1), action2_(action2) {}
+
+  // This template type conversion operator allows DoAll(a1, ..., a_n)
+  // to be used in ANY function of compatible type.
+  template <typename F>
+  operator Action<F>() const {
+    return Action<F>(new Impl<F>(action1_, action2_));
+  }
+
+ private:
+  // Implements the DoAll(...) action for a particular function type F.
+  template <typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+    typedef typename Function<F>::MakeResultVoid VoidResult;
+
+    Impl(const Action<VoidResult>& action1, const Action<F>& action2)
+        : action1_(action1), action2_(action2) {}
+
+    virtual Result Perform(const ArgumentTuple& args) {
+      action1_.Perform(args);
+      return action2_.Perform(args);
+    }
+
+   private:
+    const Action<VoidResult> action1_;
+    const Action<F> action2_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  Action1 action1_;
+  Action2 action2_;
+
+  GTEST_DISALLOW_ASSIGN_(DoBothAction);
+};
+
+}  // namespace internal
+
+// An Unused object can be implicitly constructed from ANY value.
+// This is handy when defining actions that ignore some or all of the
+// mock function arguments.  For example, given
+//
+//   MOCK_METHOD3(Foo, double(const string& label, double x, double y));
+//   MOCK_METHOD3(Bar, double(int index, double x, double y));
+//
+// instead of
+//
+//   double DistanceToOriginWithLabel(const string& label, double x, double y) {
+//     return sqrt(x*x + y*y);
+//   }
+//   double DistanceToOriginWithIndex(int index, double x, double y) {
+//     return sqrt(x*x + y*y);
+//   }
+//   ...
+//   EXEPCT_CALL(mock, Foo("abc", _, _))
+//       .WillOnce(Invoke(DistanceToOriginWithLabel));
+//   EXEPCT_CALL(mock, Bar(5, _, _))
+//       .WillOnce(Invoke(DistanceToOriginWithIndex));
+//
+// you could write
+//
+//   // We can declare any uninteresting argument as Unused.
+//   double DistanceToOrigin(Unused, double x, double y) {
+//     return sqrt(x*x + y*y);
+//   }
+//   ...
+//   EXEPCT_CALL(mock, Foo("abc", _, _)).WillOnce(Invoke(DistanceToOrigin));
+//   EXEPCT_CALL(mock, Bar(5, _, _)).WillOnce(Invoke(DistanceToOrigin));
+typedef internal::IgnoredValue Unused;
+
+// This constructor allows us to turn an Action<From> object into an
+// Action<To>, as long as To's arguments can be implicitly converted
+// to From's and From's return type cann be implicitly converted to
+// To's.
+template <typename To>
+template <typename From>
+Action<To>::Action(const Action<From>& from)
+    : impl_(new internal::ActionAdaptor<To, From>(from)) {}
+
+// Creates an action that returns 'value'.  'value' is passed by value
+// instead of const reference - otherwise Return("string literal")
+// will trigger a compiler error about using array as initializer.
+template <typename R>
+internal::ReturnAction<R> Return(R value) {
+  return internal::ReturnAction<R>(internal::move(value));
+}
+
+// Creates an action that returns NULL.
+inline PolymorphicAction<internal::ReturnNullAction> ReturnNull() {
+  return MakePolymorphicAction(internal::ReturnNullAction());
+}
+
+// Creates an action that returns from a void function.
+inline PolymorphicAction<internal::ReturnVoidAction> Return() {
+  return MakePolymorphicAction(internal::ReturnVoidAction());
+}
+
+// Creates an action that returns the reference to a variable.
+template <typename R>
+inline internal::ReturnRefAction<R> ReturnRef(R& x) {  // NOLINT
+  return internal::ReturnRefAction<R>(x);
+}
+
+// Creates an action that returns the reference to a copy of the
+// argument.  The copy is created when the action is constructed and
+// lives as long as the action.
+template <typename R>
+inline internal::ReturnRefOfCopyAction<R> ReturnRefOfCopy(const R& x) {
+  return internal::ReturnRefOfCopyAction<R>(x);
+}
+
+// Modifies the parent action (a Return() action) to perform a move of the
+// argument instead of a copy.
+// Return(ByMove()) actions can only be executed once and will assert this
+// invariant.
+template <typename R>
+internal::ByMoveWrapper<R> ByMove(R x) {
+  return internal::ByMoveWrapper<R>(internal::move(x));
+}
+
+// Creates an action that does the default action for the give mock function.
+inline internal::DoDefaultAction DoDefault() {
+  return internal::DoDefaultAction();
+}
+
+// Creates an action that sets the variable pointed by the N-th
+// (0-based) function argument to 'value'.
+template <size_t N, typename T>
+PolymorphicAction<
+  internal::SetArgumentPointeeAction<
+    N, T, internal::IsAProtocolMessage<T>::value> >
+SetArgPointee(const T& x) {
+  return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+      N, T, internal::IsAProtocolMessage<T>::value>(x));
+}
+
+#if !((GTEST_GCC_VER_ && GTEST_GCC_VER_ < 40000) || GTEST_OS_SYMBIAN)
+// This overload allows SetArgPointee() to accept a string literal.
+// GCC prior to the version 4.0 and Symbian C++ compiler cannot distinguish
+// this overload from the templated version and emit a compile error.
+template <size_t N>
+PolymorphicAction<
+  internal::SetArgumentPointeeAction<N, const char*, false> >
+SetArgPointee(const char* p) {
+  return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+      N, const char*, false>(p));
+}
+
+template <size_t N>
+PolymorphicAction<
+  internal::SetArgumentPointeeAction<N, const wchar_t*, false> >
+SetArgPointee(const wchar_t* p) {
+  return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+      N, const wchar_t*, false>(p));
+}
+#endif
+
+// The following version is DEPRECATED.
+template <size_t N, typename T>
+PolymorphicAction<
+  internal::SetArgumentPointeeAction<
+    N, T, internal::IsAProtocolMessage<T>::value> >
+SetArgumentPointee(const T& x) {
+  return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+      N, T, internal::IsAProtocolMessage<T>::value>(x));
+}
+
+// Creates an action that sets a pointer referent to a given value.
+template <typename T1, typename T2>
+PolymorphicAction<internal::AssignAction<T1, T2> > Assign(T1* ptr, T2 val) {
+  return MakePolymorphicAction(internal::AssignAction<T1, T2>(ptr, val));
+}
+
+#if !GTEST_OS_WINDOWS_MOBILE
+
+// Creates an action that sets errno and returns the appropriate error.
+template <typename T>
+PolymorphicAction<internal::SetErrnoAndReturnAction<T> >
+SetErrnoAndReturn(int errval, T result) {
+  return MakePolymorphicAction(
+      internal::SetErrnoAndReturnAction<T>(errval, result));
+}
+
+#endif  // !GTEST_OS_WINDOWS_MOBILE
+
+// Various overloads for InvokeWithoutArgs().
+
+// Creates an action that invokes 'function_impl' with no argument.
+template <typename FunctionImpl>
+PolymorphicAction<internal::InvokeWithoutArgsAction<FunctionImpl> >
+InvokeWithoutArgs(FunctionImpl function_impl) {
+  return MakePolymorphicAction(
+      internal::InvokeWithoutArgsAction<FunctionImpl>(function_impl));
+}
+
+// Creates an action that invokes the given method on the given object
+// with no argument.
+template <class Class, typename MethodPtr>
+PolymorphicAction<internal::InvokeMethodWithoutArgsAction<Class, MethodPtr> >
+InvokeWithoutArgs(Class* obj_ptr, MethodPtr method_ptr) {
+  return MakePolymorphicAction(
+      internal::InvokeMethodWithoutArgsAction<Class, MethodPtr>(
+          obj_ptr, method_ptr));
+}
+
+// Creates an action that performs an_action and throws away its
+// result.  In other words, it changes the return type of an_action to
+// void.  an_action MUST NOT return void, or the code won't compile.
+template <typename A>
+inline internal::IgnoreResultAction<A> IgnoreResult(const A& an_action) {
+  return internal::IgnoreResultAction<A>(an_action);
+}
+
+// Creates a reference wrapper for the given L-value.  If necessary,
+// you can explicitly specify the type of the reference.  For example,
+// suppose 'derived' is an object of type Derived, ByRef(derived)
+// would wrap a Derived&.  If you want to wrap a const Base& instead,
+// where Base is a base class of Derived, just write:
+//
+//   ByRef<const Base>(derived)
+template <typename T>
+inline internal::ReferenceWrapper<T> ByRef(T& l_value) {  // NOLINT
+  return internal::ReferenceWrapper<T>(l_value);
+}
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used cardinalities.  More
+// cardinalities can be defined by the user implementing the
+// CardinalityInterface interface if necessary.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+
+#include <limits.h>
+#include <ostream>  // NOLINT
+
+namespace testing {
+
+// To implement a cardinality Foo, define:
+//   1. a class FooCardinality that implements the
+//      CardinalityInterface interface, and
+//   2. a factory function that creates a Cardinality object from a
+//      const FooCardinality*.
+//
+// The two-level delegation design follows that of Matcher, providing
+// consistency for extension developers.  It also eases ownership
+// management as Cardinality objects can now be copied like plain values.
+
+// The implementation of a cardinality.
+class CardinalityInterface {
+ public:
+  virtual ~CardinalityInterface() {}
+
+  // Conservative estimate on the lower/upper bound of the number of
+  // calls allowed.
+  virtual int ConservativeLowerBound() const { return 0; }
+  virtual int ConservativeUpperBound() const { return INT_MAX; }
+
+  // Returns true iff call_count calls will satisfy this cardinality.
+  virtual bool IsSatisfiedByCallCount(int call_count) const = 0;
+
+  // Returns true iff call_count calls will saturate this cardinality.
+  virtual bool IsSaturatedByCallCount(int call_count) const = 0;
+
+  // Describes self to an ostream.
+  virtual void DescribeTo(::std::ostream* os) const = 0;
+};
+
+// A Cardinality is a copyable and IMMUTABLE (except by assignment)
+// object that specifies how many times a mock function is expected to
+// be called.  The implementation of Cardinality is just a linked_ptr
+// to const CardinalityInterface, so copying is fairly cheap.
+// Don't inherit from Cardinality!
+class GTEST_API_ Cardinality {
+ public:
+  // Constructs a null cardinality.  Needed for storing Cardinality
+  // objects in STL containers.
+  Cardinality() {}
+
+  // Constructs a Cardinality from its implementation.
+  explicit Cardinality(const CardinalityInterface* impl) : impl_(impl) {}
+
+  // Conservative estimate on the lower/upper bound of the number of
+  // calls allowed.
+  int ConservativeLowerBound() const { return impl_->ConservativeLowerBound(); }
+  int ConservativeUpperBound() const { return impl_->ConservativeUpperBound(); }
+
+  // Returns true iff call_count calls will satisfy this cardinality.
+  bool IsSatisfiedByCallCount(int call_count) const {
+    return impl_->IsSatisfiedByCallCount(call_count);
+  }
+
+  // Returns true iff call_count calls will saturate this cardinality.
+  bool IsSaturatedByCallCount(int call_count) const {
+    return impl_->IsSaturatedByCallCount(call_count);
+  }
+
+  // Returns true iff call_count calls will over-saturate this
+  // cardinality, i.e. exceed the maximum number of allowed calls.
+  bool IsOverSaturatedByCallCount(int call_count) const {
+    return impl_->IsSaturatedByCallCount(call_count) &&
+        !impl_->IsSatisfiedByCallCount(call_count);
+  }
+
+  // Describes self to an ostream
+  void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); }
+
+  // Describes the given actual call count to an ostream.
+  static void DescribeActualCallCountTo(int actual_call_count,
+                                        ::std::ostream* os);
+
+ private:
+  internal::linked_ptr<const CardinalityInterface> impl_;
+};
+
+// Creates a cardinality that allows at least n calls.
+GTEST_API_ Cardinality AtLeast(int n);
+
+// Creates a cardinality that allows at most n calls.
+GTEST_API_ Cardinality AtMost(int n);
+
+// Creates a cardinality that allows any number of calls.
+GTEST_API_ Cardinality AnyNumber();
+
+// Creates a cardinality that allows between min and max calls.
+GTEST_API_ Cardinality Between(int min, int max);
+
+// Creates a cardinality that allows exactly n calls.
+GTEST_API_ Cardinality Exactly(int n);
+
+// Creates a cardinality from its implementation.
+inline Cardinality MakeCardinality(const CardinalityInterface* c) {
+  return Cardinality(c);
+}
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+// This file was GENERATED by a script.  DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used variadic actions.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+
+
+namespace testing {
+namespace internal {
+
+// InvokeHelper<F> knows how to unpack an N-tuple and invoke an N-ary
+// function or method with the unpacked values, where F is a function
+// type that takes N arguments.
+template <typename Result, typename ArgumentTuple>
+class InvokeHelper;
+
+template <typename R>
+class InvokeHelper<R, ::testing::tuple<> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<>&) {
+           return function();
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<>&) {
+           return (obj_ptr->*method_ptr)();
+  }
+};
+
+template <typename R, typename A1>
+class InvokeHelper<R, ::testing::tuple<A1> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1>& args) {
+           return function(get<0>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2>
+class InvokeHelper<R, ::testing::tuple<A1, A2> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2>& args) {
+           return function(get<0>(args), get<1>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3,
+      A4>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4,
+      A5>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5, A6> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4, A5,
+      A6>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args), get<5>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5, A6>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args), get<5>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5, A6, A7> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4, A5,
+      A6, A7>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args), get<5>(args), get<6>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5, A6,
+                            A7>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args), get<5>(args),
+               get<6>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4, A5,
+      A6, A7, A8>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args), get<5>(args), get<6>(args),
+               get<7>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5, A6, A7,
+                            A8>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args), get<5>(args),
+               get<6>(args), get<7>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4, A5,
+      A6, A7, A8, A9>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args), get<5>(args), get<6>(args),
+               get<7>(args), get<8>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8,
+                            A9>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args), get<5>(args),
+               get<6>(args), get<7>(args), get<8>(args));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9,
+    typename A10>
+class InvokeHelper<R, ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+    A10> > {
+ public:
+  template <typename Function>
+  static R Invoke(Function function, const ::testing::tuple<A1, A2, A3, A4, A5,
+      A6, A7, A8, A9, A10>& args) {
+           return function(get<0>(args), get<1>(args), get<2>(args),
+               get<3>(args), get<4>(args), get<5>(args), get<6>(args),
+               get<7>(args), get<8>(args), get<9>(args));
+  }
+
+  template <class Class, typename MethodPtr>
+  static R InvokeMethod(Class* obj_ptr,
+                        MethodPtr method_ptr,
+                        const ::testing::tuple<A1, A2, A3, A4, A5, A6, A7, A8,
+                            A9, A10>& args) {
+           return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args),
+               get<2>(args), get<3>(args), get<4>(args), get<5>(args),
+               get<6>(args), get<7>(args), get<8>(args), get<9>(args));
+  }
+};
+
+// An INTERNAL macro for extracting the type of a tuple field.  It's
+// subject to change without notice - DO NOT USE IN USER CODE!
+#define GMOCK_FIELD_(Tuple, N) \
+    typename ::testing::tuple_element<N, Tuple>::type
+
+// SelectArgs<Result, ArgumentTuple, k1, k2, ..., k_n>::type is the
+// type of an n-ary function whose i-th (1-based) argument type is the
+// k{i}-th (0-based) field of ArgumentTuple, which must be a tuple
+// type, and whose return type is Result.  For example,
+//   SelectArgs<int, ::testing::tuple<bool, char, double, long>, 0, 3>::type
+// is int(bool, long).
+//
+// SelectArgs<Result, ArgumentTuple, k1, k2, ..., k_n>::Select(args)
+// returns the selected fields (k1, k2, ..., k_n) of args as a tuple.
+// For example,
+//   SelectArgs<int, tuple<bool, char, double>, 2, 0>::Select(
+//       ::testing::make_tuple(true, 'a', 2.5))
+// returns tuple (2.5, true).
+//
+// The numbers in list k1, k2, ..., k_n must be >= 0, where n can be
+// in the range [0, 10].  Duplicates are allowed and they don't have
+// to be in an ascending or descending order.
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5, int k6, int k7, int k8, int k9, int k10>
+class SelectArgs {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+      GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+      GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9),
+      GMOCK_FIELD_(ArgumentTuple, k10));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+        get<k8>(args), get<k9>(args), get<k10>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple>
+class SelectArgs<Result, ArgumentTuple,
+                 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type();
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& /* args */) {
+    return SelectedArgs();
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, k5, -1, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5, int k6>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, k5, k6, -1, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+      GMOCK_FIELD_(ArgumentTuple, k6));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args), get<k6>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5, int k6, int k7>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, k5, k6, k7, -1, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+      GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5, int k6, int k7, int k8>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, k5, k6, k7, k8, -1, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+      GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+      GMOCK_FIELD_(ArgumentTuple, k8));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+        get<k8>(args));
+  }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+    int k4, int k5, int k6, int k7, int k8, int k9>
+class SelectArgs<Result, ArgumentTuple,
+                 k1, k2, k3, k4, k5, k6, k7, k8, k9, -1> {
+ public:
+  typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+      GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+      GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+      GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+      GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9));
+  typedef typename Function<type>::ArgumentTuple SelectedArgs;
+  static SelectedArgs Select(const ArgumentTuple& args) {
+    return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+        get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+        get<k8>(args), get<k9>(args));
+  }
+};
+
+#undef GMOCK_FIELD_
+
+// Implements the WithArgs action.
+template <typename InnerAction, int k1 = -1, int k2 = -1, int k3 = -1,
+    int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+    int k9 = -1, int k10 = -1>
+class WithArgsAction {
+ public:
+  explicit WithArgsAction(const InnerAction& action) : action_(action) {}
+
+  template <typename F>
+  operator Action<F>() const { return MakeAction(new Impl<F>(action_)); }
+
+ private:
+  template <typename F>
+  class Impl : public ActionInterface<F> {
+   public:
+    typedef typename Function<F>::Result Result;
+    typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+    explicit Impl(const InnerAction& action) : action_(action) {}
+
+    virtual Result Perform(const ArgumentTuple& args) {
+      return action_.Perform(SelectArgs<Result, ArgumentTuple, k1, k2, k3, k4,
+          k5, k6, k7, k8, k9, k10>::Select(args));
+    }
+
+   private:
+    typedef typename SelectArgs<Result, ArgumentTuple,
+        k1, k2, k3, k4, k5, k6, k7, k8, k9, k10>::type InnerFunctionType;
+
+    Action<InnerFunctionType> action_;
+  };
+
+  const InnerAction action_;
+
+  GTEST_DISALLOW_ASSIGN_(WithArgsAction);
+};
+
+// A macro from the ACTION* family (defined later in this file)
+// defines an action that can be used in a mock function.  Typically,
+// these actions only care about a subset of the arguments of the mock
+// function.  For example, if such an action only uses the second
+// argument, it can be used in any mock function that takes >= 2
+// arguments where the type of the second argument is compatible.
+//
+// Therefore, the action implementation must be prepared to take more
+// arguments than it needs.  The ExcessiveArg type is used to
+// represent those excessive arguments.  In order to keep the compiler
+// error messages tractable, we define it in the testing namespace
+// instead of testing::internal.  However, this is an INTERNAL TYPE
+// and subject to change without notice, so a user MUST NOT USE THIS
+// TYPE DIRECTLY.
+struct ExcessiveArg {};
+
+// A helper class needed for implementing the ACTION* macros.
+template <typename Result, class Impl>
+class ActionHelper {
+ public:
+  static Result Perform(Impl* impl, const ::testing::tuple<>& args) {
+    return impl->template gmock_PerformImpl<>(args, ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0>& args) {
+    return impl->template gmock_PerformImpl<A0>(args, get<0>(args),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1>& args) {
+    return impl->template gmock_PerformImpl<A0, A1>(args, get<0>(args),
+        get<1>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2>(args, get<0>(args),
+        get<1>(args), get<2>(args), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2,
+      A3>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3>(args, get<0>(args),
+        get<1>(args), get<2>(args), get<3>(args), ExcessiveArg(),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3,
+      A4>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4>(args,
+        get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+        ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4,
+      typename A5>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3, A4,
+      A5>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5>(args,
+        get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+        get<5>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4,
+      typename A5, typename A6>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3, A4,
+      A5, A6>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6>(args,
+        get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+        get<5>(args), get<6>(args), ExcessiveArg(), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4,
+      typename A5, typename A6, typename A7>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3, A4,
+      A5, A6, A7>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6,
+        A7>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+        get<4>(args), get<5>(args), get<6>(args), get<7>(args), ExcessiveArg(),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4,
+      typename A5, typename A6, typename A7, typename A8>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3, A4,
+      A5, A6, A7, A8>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6, A7,
+        A8>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+        get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args),
+        ExcessiveArg());
+  }
+
+  template <typename A0, typename A1, typename A2, typename A3, typename A4,
+      typename A5, typename A6, typename A7, typename A8, typename A9>
+  static Result Perform(Impl* impl, const ::testing::tuple<A0, A1, A2, A3, A4,
+      A5, A6, A7, A8, A9>& args) {
+    return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6, A7, A8,
+        A9>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+        get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args),
+        get<9>(args));
+  }
+};
+
+}  // namespace internal
+
+// Various overloads for Invoke().
+
+// WithArgs<N1, N2, ..., Nk>(an_action) creates an action that passes
+// the selected arguments of the mock function to an_action and
+// performs it.  It serves as an adaptor between actions with
+// different argument lists.  C++ doesn't support default arguments for
+// function templates, so we have to overload it.
+template <int k1, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1>(action);
+}
+
+template <int k1, int k2, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2>(action);
+}
+
+template <int k1, int k2, int k3, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3>(action);
+}
+
+template <int k1, int k2, int k3, int k4, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7,
+    typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6,
+      k7>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7,
+      k8>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    int k9, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8, k9>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+      k9>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    int k9, int k10, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+    k9, k10>
+WithArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+      k9, k10>(action);
+}
+
+// Creates an action that does actions a1, a2, ..., sequentially in
+// each invocation.
+template <typename Action1, typename Action2>
+inline internal::DoBothAction<Action1, Action2>
+DoAll(Action1 a1, Action2 a2) {
+  return internal::DoBothAction<Action1, Action2>(a1, a2);
+}
+
+template <typename Action1, typename Action2, typename Action3>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    Action3> >
+DoAll(Action1 a1, Action2 a2, Action3 a3) {
+  return DoAll(a1, DoAll(a2, a3));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, Action4> > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4) {
+  return DoAll(a1, DoAll(a2, a3, a4));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    Action5> > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5, typename Action6>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    internal::DoBothAction<Action5, Action6> > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5, a6));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5, typename Action6, typename Action7>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+    Action7> > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+    Action7 a7) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5, typename Action6, typename Action7,
+    typename Action8>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+    internal::DoBothAction<Action7, Action8> > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+    Action7 a7, Action8 a8) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5, typename Action6, typename Action7,
+    typename Action8, typename Action9>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+    internal::DoBothAction<Action7, internal::DoBothAction<Action8,
+    Action9> > > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+    Action7 a7, Action8 a8, Action9 a9) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+    typename Action4, typename Action5, typename Action6, typename Action7,
+    typename Action8, typename Action9, typename Action10>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+    internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+    internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+    internal::DoBothAction<Action7, internal::DoBothAction<Action8,
+    internal::DoBothAction<Action9, Action10> > > > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+    Action7 a7, Action8 a8, Action9 a9, Action10 a10) {
+  return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9, a10));
+}
+
+}  // namespace testing
+
+// The ACTION* family of macros can be used in a namespace scope to
+// define custom actions easily.  The syntax:
+//
+//   ACTION(name) { statements; }
+//
+// will define an action with the given name that executes the
+// statements.  The value returned by the statements will be used as
+// the return value of the action.  Inside the statements, you can
+// refer to the K-th (0-based) argument of the mock function by
+// 'argK', and refer to its type by 'argK_type'.  For example:
+//
+//   ACTION(IncrementArg1) {
+//     arg1_type temp = arg1;
+//     return ++(*temp);
+//   }
+//
+// allows you to write
+//
+//   ...WillOnce(IncrementArg1());
+//
+// You can also refer to the entire argument tuple and its type by
+// 'args' and 'args_type', and refer to the mock function type and its
+// return type by 'function_type' and 'return_type'.
+//
+// Note that you don't need to specify the types of the mock function
+// arguments.  However rest assured that your code is still type-safe:
+// you'll get a compiler error if *arg1 doesn't support the ++
+// operator, or if the type of ++(*arg1) isn't compatible with the
+// mock function's return type, for example.
+//
+// Sometimes you'll want to parameterize the action.   For that you can use
+// another macro:
+//
+//   ACTION_P(name, param_name) { statements; }
+//
+// For example:
+//
+//   ACTION_P(Add, n) { return arg0 + n; }
+//
+// will allow you to write:
+//
+//   ...WillOnce(Add(5));
+//
+// Note that you don't need to provide the type of the parameter
+// either.  If you need to reference the type of a parameter named
+// 'foo', you can write 'foo_type'.  For example, in the body of
+// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type
+// of 'n'.
+//
+// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support
+// multi-parameter actions.
+//
+// For the purpose of typing, you can view
+//
+//   ACTION_Pk(Foo, p1, ..., pk) { ... }
+//
+// as shorthand for
+//
+//   template <typename p1_type, ..., typename pk_type>
+//   FooActionPk<p1_type, ..., pk_type> Foo(p1_type p1, ..., pk_type pk) { ... }
+//
+// In particular, you can provide the template type arguments
+// explicitly when invoking Foo(), as in Foo<long, bool>(5, false);
+// although usually you can rely on the compiler to infer the types
+// for you automatically.  You can assign the result of expression
+// Foo(p1, ..., pk) to a variable of type FooActionPk<p1_type, ...,
+// pk_type>.  This can be useful when composing actions.
+//
+// You can also overload actions with different numbers of parameters:
+//
+//   ACTION_P(Plus, a) { ... }
+//   ACTION_P2(Plus, a, b) { ... }
+//
+// While it's tempting to always use the ACTION* macros when defining
+// a new action, you should also consider implementing ActionInterface
+// or using MakePolymorphicAction() instead, especially if you need to
+// use the action a lot.  While these approaches require more work,
+// they give you more control on the types of the mock function
+// arguments and the action parameters, which in general leads to
+// better compiler error messages that pay off in the long run.  They
+// also allow overloading actions based on parameter types (as opposed
+// to just based on the number of parameters).
+//
+// CAVEAT:
+//
+// ACTION*() can only be used in a namespace scope.  The reason is
+// that C++ doesn't yet allow function-local types to be used to
+// instantiate templates.  The up-coming C++0x standard will fix this.
+// Once that's done, we'll consider supporting using ACTION*() inside
+// a function.
+//
+// MORE INFORMATION:
+//
+// To learn more about using these macros, please search for 'ACTION'
+// on http://code.google.com/p/googlemock/wiki/CookBook.
+
+// An internal macro needed for implementing ACTION*().
+#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\
+    const args_type& args GTEST_ATTRIBUTE_UNUSED_, \
+    arg0_type arg0 GTEST_ATTRIBUTE_UNUSED_, \
+    arg1_type arg1 GTEST_ATTRIBUTE_UNUSED_, \
+    arg2_type arg2 GTEST_ATTRIBUTE_UNUSED_, \
+    arg3_type arg3 GTEST_ATTRIBUTE_UNUSED_, \
+    arg4_type arg4 GTEST_ATTRIBUTE_UNUSED_, \
+    arg5_type arg5 GTEST_ATTRIBUTE_UNUSED_, \
+    arg6_type arg6 GTEST_ATTRIBUTE_UNUSED_, \
+    arg7_type arg7 GTEST_ATTRIBUTE_UNUSED_, \
+    arg8_type arg8 GTEST_ATTRIBUTE_UNUSED_, \
+    arg9_type arg9 GTEST_ATTRIBUTE_UNUSED_
+
+// Sometimes you want to give an action explicit template parameters
+// that cannot be inferred from its value parameters.  ACTION() and
+// ACTION_P*() don't support that.  ACTION_TEMPLATE() remedies that
+// and can be viewed as an extension to ACTION() and ACTION_P*().
+//
+// The syntax:
+//
+//   ACTION_TEMPLATE(ActionName,
+//                   HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m),
+//                   AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; }
+//
+// defines an action template that takes m explicit template
+// parameters and n value parameters.  name_i is the name of the i-th
+// template parameter, and kind_i specifies whether it's a typename,
+// an integral constant, or a template.  p_i is the name of the i-th
+// value parameter.
+//
+// Example:
+//
+//   // DuplicateArg<k, T>(output) converts the k-th argument of the mock
+//   // function to type T and copies it to *output.
+//   ACTION_TEMPLATE(DuplicateArg,
+//                   HAS_2_TEMPLATE_PARAMS(int, k, typename, T),
+//                   AND_1_VALUE_PARAMS(output)) {
+//     *output = T(::testing::get<k>(args));
+//   }
+//   ...
+//     int n;
+//     EXPECT_CALL(mock, Foo(_, _))
+//         .WillOnce(DuplicateArg<1, unsigned char>(&n));
+//
+// To create an instance of an action template, write:
+//
+//   ActionName<t1, ..., t_m>(v1, ..., v_n)
+//
+// where the ts are the template arguments and the vs are the value
+// arguments.  The value argument types are inferred by the compiler.
+// If you want to explicitly specify the value argument types, you can
+// provide additional template arguments:
+//
+//   ActionName<t1, ..., t_m, u1, ..., u_k>(v1, ..., v_n)
+//
+// where u_i is the desired type of v_i.
+//
+// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the
+// number of value parameters, but not on the number of template
+// parameters.  Without the restriction, the meaning of the following
+// is unclear:
+//
+//   OverloadedAction<int, bool>(x);
+//
+// Are we using a single-template-parameter action where 'bool' refers
+// to the type of x, or are we using a two-template-parameter action
+// where the compiler is asked to infer the type of x?
+//
+// Implementation notes:
+//
+// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and
+// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for
+// implementing ACTION_TEMPLATE.  The main trick we use is to create
+// new macro invocations when expanding a macro.  For example, we have
+//
+//   #define ACTION_TEMPLATE(name, template_params, value_params)
+//       ... GMOCK_INTERNAL_DECL_##template_params ...
+//
+// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...)
+// to expand to
+//
+//       ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ...
+//
+// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the
+// preprocessor will continue to expand it to
+//
+//       ... typename T ...
+//
+// This technique conforms to the C++ standard and is portable.  It
+// allows us to implement action templates using O(N) code, where N is
+// the maximum number of template/value parameters supported.  Without
+// using it, we'd have to devote O(N^2) amount of code to implement all
+// combinations of m and n.
+
+// Declares the template parameters.
+#define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0
+#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \
+    name1) kind0 name0, kind1 name1
+#define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2) kind0 name0, kind1 name1, kind2 name2
+#define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3) kind0 name0, kind1 name1, kind2 name2, \
+    kind3 name3
+#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4) kind0 name0, kind1 name1, \
+    kind2 name2, kind3 name3, kind4 name4
+#define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5) kind0 name0, \
+    kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5
+#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+    name6) kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
+    kind5 name5, kind6 name6
+#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+    kind7, name7) kind0 name0, kind1 name1, kind2 name2, kind3 name3, \
+    kind4 name4, kind5 name5, kind6 name6, kind7 name7
+#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+    kind7, name7, kind8, name8) kind0 name0, kind1 name1, kind2 name2, \
+    kind3 name3, kind4 name4, kind5 name5, kind6 name6, kind7 name7, \
+    kind8 name8
+#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \
+    name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+    name6, kind7, name7, kind8, name8, kind9, name9) kind0 name0, \
+    kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5, \
+    kind6 name6, kind7 name7, kind8 name8, kind9 name9
+
+// Lists the template parameters.
+#define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0
+#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \
+    name1) name0, name1
+#define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2) name0, name1, name2
+#define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3) name0, name1, name2, name3
+#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4) name0, name1, name2, name3, \
+    name4
+#define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5) name0, name1, \
+    name2, name3, name4, name5
+#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+    name6) name0, name1, name2, name3, name4, name5, name6
+#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+    kind7, name7) name0, name1, name2, name3, name4, name5, name6, name7
+#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+    kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+    kind7, name7, kind8, name8) name0, name1, name2, name3, name4, name5, \
+    name6, name7, name8
+#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \
+    name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+    name6, kind7, name7, kind8, name8, kind9, name9) name0, name1, name2, \
+    name3, name4, name5, name6, name7, name8, name9
+
+// Declares the types of value parameters.
+#define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) , \
+    typename p0##_type, typename p1##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , \
+    typename p0##_type, typename p1##_type, typename p2##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \
+    typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \
+    typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type, typename p4##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \
+    typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type, typename p4##_type, typename p5##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6) , typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type, typename p4##_type, typename p5##_type, \
+    typename p6##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7) , typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type, typename p4##_type, typename p5##_type, \
+    typename p6##_type, typename p7##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7, p8) , typename p0##_type, typename p1##_type, typename p2##_type, \
+    typename p3##_type, typename p4##_type, typename p5##_type, \
+    typename p6##_type, typename p7##_type, typename p8##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7, p8, p9) , typename p0##_type, typename p1##_type, \
+    typename p2##_type, typename p3##_type, typename p4##_type, \
+    typename p5##_type, typename p6##_type, typename p7##_type, \
+    typename p8##_type, typename p9##_type
+
+// Initializes the value parameters.
+#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS()\
+    ()
+#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0)\
+    (p0##_type gmock_p0) : p0(gmock_p0)
+#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1)\
+    (p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), p1(gmock_p1)
+#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2)
+#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3)
+#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4)
+#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5)
+#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+        p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6)
+#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+        p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+        p7(gmock_p7)
+#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+        p6##_type gmock_p6, p7##_type gmock_p7, \
+        p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+        p8(gmock_p8)
+#define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8, p9)\
+    (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+        p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+        p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+        p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+        p8(gmock_p8), p9(gmock_p9)
+
+// Declares the fields for storing the value parameters.
+#define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0;
+#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0; \
+    p1##_type p1;
+#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0; \
+    p1##_type p1; p2##_type p2;
+#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0; \
+    p1##_type p1; p2##_type p2; p3##_type p3;
+#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \
+    p4) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4;
+#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \
+    p5) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+    p5##_type p5;
+#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+    p5##_type p5; p6##_type p6;
+#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+    p5##_type p5; p6##_type p6; p7##_type p7;
+#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \
+    p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8;
+#define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8, p9) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \
+    p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; \
+    p9##_type p9;
+
+// Lists the value parameters.
+#define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_LIST_AND_1_VALUE_PARAMS(p0) p0
+#define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1
+#define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2
+#define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3
+#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) p0, p1, \
+    p2, p3, p4
+#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) p0, \
+    p1, p2, p3, p4, p5
+#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6) p0, p1, p2, p3, p4, p5, p6
+#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7) p0, p1, p2, p3, p4, p5, p6, p7
+#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8) p0, p1, p2, p3, p4, p5, p6, p7, p8
+#define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8, p9) p0, p1, p2, p3, p4, p5, p6, p7, p8, p9
+
+// Lists the value parameter types.
+#define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) , p0##_type, \
+    p1##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , p0##_type, \
+    p1##_type, p2##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \
+    p0##_type, p1##_type, p2##_type, p3##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \
+    p0##_type, p1##_type, p2##_type, p3##_type, p4##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \
+    p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \
+    p6##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+    p5##_type, p6##_type, p7##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7, p8) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+    p5##_type, p6##_type, p7##_type, p8##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6, p7, p8, p9) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+    p5##_type, p6##_type, p7##_type, p8##_type, p9##_type
+
+// Declares the value parameters.
+#define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0
+#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0, \
+    p1##_type p1
+#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0, \
+    p1##_type p1, p2##_type p2
+#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0, \
+    p1##_type p1, p2##_type p2, p3##_type p3
+#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \
+    p4) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4
+#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \
+    p5) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+    p5##_type p5
+#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+    p6) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+    p5##_type p5, p6##_type p6
+#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+    p5##_type p5, p6##_type p6, p7##_type p7
+#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+    p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8
+#define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8, p9) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+    p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+    p9##_type p9
+
+// The suffix of the class template implementing the action template.
+#define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_COUNT_AND_1_VALUE_PARAMS(p0) P
+#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2
+#define GMOCK_INTERNAL_COUNT_AND_3_VALUE_PARAMS(p0, p1, p2) P3
+#define GMOCK_INTERNAL_COUNT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) P4
+#define GMOCK_INTERNAL_COUNT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) P5
+#define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6
+#define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7
+#define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7) P8
+#define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8) P9
+#define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+    p7, p8, p9) P10
+
+// The name of the class template implementing the action template.
+#define GMOCK_ACTION_CLASS_(name, value_params)\
+    GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params)
+
+#define ACTION_TEMPLATE(name, template_params, value_params)\
+  template <GMOCK_INTERNAL_DECL_##template_params\
+            GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+  class GMOCK_ACTION_CLASS_(name, value_params) {\
+   public:\
+    explicit GMOCK_ACTION_CLASS_(name, value_params)\
+        GMOCK_INTERNAL_INIT_##value_params {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      GMOCK_INTERNAL_DEFN_##value_params\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(\
+          new gmock_Impl<F>(GMOCK_INTERNAL_LIST_##value_params));\
+    }\
+    GMOCK_INTERNAL_DEFN_##value_params\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\
+  };\
+  template <GMOCK_INTERNAL_DECL_##template_params\
+            GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+  inline GMOCK_ACTION_CLASS_(name, value_params)<\
+      GMOCK_INTERNAL_LIST_##template_params\
+      GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\
+          GMOCK_INTERNAL_DECL_##value_params) {\
+    return GMOCK_ACTION_CLASS_(name, value_params)<\
+        GMOCK_INTERNAL_LIST_##template_params\
+        GMOCK_INTERNAL_LIST_TYPE_##value_params>(\
+            GMOCK_INTERNAL_LIST_##value_params);\
+  }\
+  template <GMOCK_INTERNAL_DECL_##template_params\
+            GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      GMOCK_ACTION_CLASS_(name, value_params)<\
+          GMOCK_INTERNAL_LIST_##template_params\
+          GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl<F>::\
+              gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION(name)\
+  class name##Action {\
+   public:\
+    name##Action() {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl() {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>());\
+    }\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##Action);\
+  };\
+  inline name##Action name() {\
+    return name##Action();\
+  }\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##Action::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P(name, p0)\
+  template <typename p0##_type>\
+  class name##ActionP {\
+   public:\
+    explicit name##ActionP(p0##_type gmock_p0) : p0(gmock_p0) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      explicit gmock_Impl(p0##_type gmock_p0) : p0(gmock_p0) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0));\
+    }\
+    p0##_type p0;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP);\
+  };\
+  template <typename p0##_type>\
+  inline name##ActionP<p0##_type> name(p0##_type p0) {\
+    return name##ActionP<p0##_type>(p0);\
+  }\
+  template <typename p0##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP<p0##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P2(name, p0, p1)\
+  template <typename p0##_type, typename p1##_type>\
+  class name##ActionP2 {\
+   public:\
+    name##ActionP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+        p1(gmock_p1) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+          p1(gmock_p1) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP2);\
+  };\
+  template <typename p0##_type, typename p1##_type>\
+  inline name##ActionP2<p0##_type, p1##_type> name(p0##_type p0, \
+      p1##_type p1) {\
+    return name##ActionP2<p0##_type, p1##_type>(p0, p1);\
+  }\
+  template <typename p0##_type, typename p1##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP2<p0##_type, p1##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P3(name, p0, p1, p2)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  class name##ActionP3 {\
+   public:\
+    name##ActionP3(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, \
+          p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP3);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  inline name##ActionP3<p0##_type, p1##_type, p2##_type> name(p0##_type p0, \
+      p1##_type p1, p2##_type p2) {\
+    return name##ActionP3<p0##_type, p1##_type, p2##_type>(p0, p1, p2);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP3<p0##_type, p1##_type, \
+          p2##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P4(name, p0, p1, p2, p3)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  class name##ActionP4 {\
+   public:\
+    name##ActionP4(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+          p3(gmock_p3) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP4);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  inline name##ActionP4<p0##_type, p1##_type, p2##_type, \
+      p3##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+      p3##_type p3) {\
+    return name##ActionP4<p0##_type, p1##_type, p2##_type, p3##_type>(p0, p1, \
+        p2, p3);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP4<p0##_type, p1##_type, p2##_type, \
+          p3##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P5(name, p0, p1, p2, p3, p4)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  class name##ActionP5 {\
+   public:\
+    name##ActionP5(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, \
+        p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), \
+          p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP5);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  inline name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4) {\
+    return name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type>(p0, p1, p2, p3, p4);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+          p4##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P6(name, p0, p1, p2, p3, p4, p5)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  class name##ActionP6 {\
+   public:\
+    name##ActionP6(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, \
+          p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+          p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP6);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  inline name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+      p3##_type p3, p4##_type p4, p5##_type p5) {\
+    return name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+          p5##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P7(name, p0, p1, p2, p3, p4, p5, p6)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  class name##ActionP7 {\
+   public:\
+    name##ActionP7(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \
+        p6(gmock_p6) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+          p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+          p6));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP7);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  inline name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type> name(p0##_type p0, p1##_type p1, \
+      p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+      p6##_type p6) {\
+    return name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, p6);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+          p5##_type, p6##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P8(name, p0, p1, p2, p3, p4, p5, p6, p7)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  class name##ActionP8 {\
+   public:\
+    name##ActionP8(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, \
+        p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+        p7(gmock_p7) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), \
+          p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), \
+          p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+          p6, p7));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP8);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  inline name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type> name(p0##_type p0, \
+      p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+      p6##_type p6, p7##_type p7) {\
+    return name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, p3, p4, p5, \
+        p6, p7);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+          p5##_type, p6##_type, \
+          p7##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  class name##ActionP9 {\
+   public:\
+    name##ActionP9(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+        p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+        p8(gmock_p8) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7, \
+          p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+          p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+          p7(gmock_p7), p8(gmock_p8) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+      p8##_type p8;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+          p6, p7, p8));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+    p8##_type p8;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP9);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  inline name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type, \
+      p8##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \
+      p8##_type p8) {\
+    return name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type, p8##_type>(p0, p1, p2, \
+        p3, p4, p5, p6, p7, p8);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+          p5##_type, p6##_type, p7##_type, \
+          p8##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  class name##ActionP10 {\
+   public:\
+    name##ActionP10(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+        p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+        p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\
+    template <typename F>\
+    class gmock_Impl : public ::testing::ActionInterface<F> {\
+     public:\
+      typedef F function_type;\
+      typedef typename ::testing::internal::Function<F>::Result return_type;\
+      typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+          args_type;\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+          p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+          p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+          p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\
+      virtual return_type Perform(const args_type& args) {\
+        return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+            Perform(this, args);\
+      }\
+      template <typename arg0_type, typename arg1_type, typename arg2_type, \
+          typename arg3_type, typename arg4_type, typename arg5_type, \
+          typename arg6_type, typename arg7_type, typename arg8_type, \
+          typename arg9_type>\
+      return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+          arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+          arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+          arg9_type arg9) const;\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+      p8##_type p8;\
+      p9##_type p9;\
+     private:\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename F> operator ::testing::Action<F>() const {\
+      return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+          p6, p7, p8, p9));\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+    p8##_type p8;\
+    p9##_type p9;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##ActionP10);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  inline name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+      p9##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+      p9##_type p9) {\
+    return name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, p9##_type>(p0, \
+        p1, p2, p3, p4, p5, p6, p7, p8, p9);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  template <typename F>\
+  template <typename arg0_type, typename arg1_type, typename arg2_type, \
+      typename arg3_type, typename arg4_type, typename arg5_type, \
+      typename arg6_type, typename arg7_type, typename arg8_type, \
+      typename arg9_type>\
+  typename ::testing::internal::Function<F>::Result\
+      name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+          p5##_type, p6##_type, p7##_type, p8##_type, \
+          p9##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+          GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+namespace testing {
+
+
+// The ACTION*() macros trigger warning C4100 (unreferenced formal
+// parameter) in MSVC with -W4.  Unfortunately they cannot be fixed in
+// the macro definition, as the warnings are generated when the macro
+// is expanded and macro expansion cannot contain #pragma.  Therefore
+// we suppress them here.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4100)
+#endif
+
+// Various overloads for InvokeArgument<N>().
+//
+// The InvokeArgument<N>(a1, a2, ..., a_k) action invokes the N-th
+// (0-based) argument, which must be a k-ary callable, of the mock
+// function, with arguments a1, a2, ..., a_k.
+//
+// Notes:
+//
+//   1. The arguments are passed by value by default.  If you need to
+//   pass an argument by reference, wrap it inside ByRef().  For
+//   example,
+//
+//     InvokeArgument<1>(5, string("Hello"), ByRef(foo))
+//
+//   passes 5 and string("Hello") by value, and passes foo by
+//   reference.
+//
+//   2. If the callable takes an argument by reference but ByRef() is
+//   not used, it will receive the reference to a copy of the value,
+//   instead of the original value.  For example, when the 0-th
+//   argument of the mock function takes a const string&, the action
+//
+//     InvokeArgument<0>(string("Hello"))
+//
+//   makes a copy of the temporary string("Hello") object and passes a
+//   reference of the copy, instead of the original temporary object,
+//   to the callable.  This makes it easy for a user to define an
+//   InvokeArgument action from temporary values and have it performed
+//   later.
+
+namespace internal {
+namespace invoke_argument {
+
+// Appears in InvokeArgumentAdl's argument list to help avoid
+// accidental calls to user functions of the same name.
+struct AdlTag {};
+
+// InvokeArgumentAdl - a helper for InvokeArgument.
+// The basic overloads are provided here for generic functors.
+// Overloads for other custom-callables are provided in the
+// internal/custom/callback-actions.h header.
+
+template <typename R, typename F>
+R InvokeArgumentAdl(AdlTag, F f) {
+  return f();
+}
+template <typename R, typename F, typename A1>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1) {
+  return f(a1);
+}
+template <typename R, typename F, typename A1, typename A2>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2) {
+  return f(a1, a2);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3) {
+  return f(a1, a2, a3);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4) {
+  return f(a1, a2, a3, a4);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) {
+  return f(a1, a2, a3, a4, a5);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) {
+  return f(a1, a2, a3, a4, a5, a6);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+    A7 a7) {
+  return f(a1, a2, a3, a4, a5, a6, a7);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7, typename A8>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+    A7 a7, A8 a8) {
+  return f(a1, a2, a3, a4, a5, a6, a7, a8);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7, typename A8,
+    typename A9>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+    A7 a7, A8 a8, A9 a9) {
+  return f(a1, a2, a3, a4, a5, a6, a7, a8, a9);
+}
+template <typename R, typename F, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7, typename A8,
+    typename A9, typename A10>
+R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+    A7 a7, A8 a8, A9 a9, A10 a10) {
+  return f(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10);
+}
+}  // namespace invoke_argument
+}  // namespace internal
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_0_VALUE_PARAMS()) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args));
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_1_VALUE_PARAMS(p0)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_2_VALUE_PARAMS(p0, p1)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_3_VALUE_PARAMS(p0, p1, p2)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4, p5);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4, p5, p6);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7, p8);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) {
+  using internal::invoke_argument::InvokeArgumentAdl;
+  return InvokeArgumentAdl<return_type>(
+      internal::invoke_argument::AdlTag(),
+      ::testing::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
+}
+
+// Various overloads for ReturnNew<T>().
+//
+// The ReturnNew<T>(a1, a2, ..., a_k) action returns a pointer to a new
+// instance of type T, constructed on the heap with constructor arguments
+// a1, a2, ..., and a_k. The caller assumes ownership of the returned value.
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_0_VALUE_PARAMS()) {
+  return new T();
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_1_VALUE_PARAMS(p0)) {
+  return new T(p0);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_2_VALUE_PARAMS(p0, p1)) {
+  return new T(p0, p1);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_3_VALUE_PARAMS(p0, p1, p2)) {
+  return new T(p0, p1, p2);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
+  return new T(p0, p1, p2, p3);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
+  return new T(p0, p1, p2, p3, p4);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
+  return new T(p0, p1, p2, p3, p4, p5);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
+  return new T(p0, p1, p2, p3, p4, p5, p6);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) {
+  return new T(p0, p1, p2, p3, p4, p5, p6, p7);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) {
+  return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+                HAS_1_TEMPLATE_PARAMS(typename, T),
+                AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) {
+  return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+}  // namespace testing
+
+// Include any custom actions added by the local installation.
+// We must include this header at the end to make sure it can use the
+// declarations from this file.
+// This file was GENERATED by command:
+//     pump.py gmock-generated-actions.h.pump
+// DO NOT EDIT BY HAND!!!
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_
+
+#endif  // GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_GENERATED_ACTIONS_H_
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+// This file was GENERATED by command:
+//     pump.py gmock-generated-function-mockers.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements function mockers of various arities.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements the ON_CALL() and EXPECT_CALL() macros.
+//
+// A user can use the ON_CALL() macro to specify the default action of
+// a mock method.  The syntax is:
+//
+//   ON_CALL(mock_object, Method(argument-matchers))
+//       .With(multi-argument-matcher)
+//       .WillByDefault(action);
+//
+//  where the .With() clause is optional.
+//
+// A user can use the EXPECT_CALL() macro to specify an expectation on
+// a mock method.  The syntax is:
+//
+//   EXPECT_CALL(mock_object, Method(argument-matchers))
+//       .With(multi-argument-matchers)
+//       .Times(cardinality)
+//       .InSequence(sequences)
+//       .After(expectations)
+//       .WillOnce(action)
+//       .WillRepeatedly(action)
+//       .RetiresOnSaturation();
+//
+// where all clauses are optional, and .InSequence()/.After()/
+// .WillOnce() can appear any number of times.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+
+#include <map>
+#include <set>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>  // NOLINT
+#endif
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used argument matchers.  More
+// matchers can be defined by the user implementing the
+// MatcherInterface<T> interface if necessary.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+
+#include <math.h>
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <ostream>  // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+
+#if GTEST_HAS_STD_INITIALIZER_LIST_
+# include <initializer_list>  // NOLINT -- must be after gtest.h
+#endif
+
+namespace testing {
+
+// To implement a matcher Foo for type T, define:
+//   1. a class FooMatcherImpl that implements the
+//      MatcherInterface<T> interface, and
+//   2. a factory function that creates a Matcher<T> object from a
+//      FooMatcherImpl*.
+//
+// The two-level delegation design makes it possible to allow a user
+// to write "v" instead of "Eq(v)" where a Matcher is expected, which
+// is impossible if we pass matchers by pointers.  It also eases
+// ownership management as Matcher objects can now be copied like
+// plain values.
+
+// MatchResultListener is an abstract class.  Its << operator can be
+// used by a matcher to explain why a value matches or doesn't match.
+//
+// TODO(wan@google.com): add method
+//   bool InterestedInWhy(bool result) const;
+// to indicate whether the listener is interested in why the match
+// result is 'result'.
+class MatchResultListener {
+ public:
+  // Creates a listener object with the given underlying ostream.  The
+  // listener does not own the ostream, and does not dereference it
+  // in the constructor or destructor.
+  explicit MatchResultListener(::std::ostream* os) : stream_(os) {}
+  virtual ~MatchResultListener() = 0;  // Makes this class abstract.
+
+  // Streams x to the underlying ostream; does nothing if the ostream
+  // is NULL.
+  template <typename T>
+  MatchResultListener& operator<<(const T& x) {
+    if (stream_ != NULL)
+      *stream_ << x;
+    return *this;
+  }
+
+  // Returns the underlying ostream.
+  ::std::ostream* stream() { return stream_; }
+
+  // Returns true iff the listener is interested in an explanation of
+  // the match result.  A matcher's MatchAndExplain() method can use
+  // this information to avoid generating the explanation when no one
+  // intends to hear it.
+  bool IsInterested() const { return stream_ != NULL; }
+
+ private:
+  ::std::ostream* const stream_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener);
+};
+
+inline MatchResultListener::~MatchResultListener() {
+}
+
+// An instance of a subclass of this knows how to describe itself as a
+// matcher.
+class MatcherDescriberInterface {
+ public:
+  virtual ~MatcherDescriberInterface() {}
+
+  // Describes this matcher to an ostream.  The function should print
+  // a verb phrase that describes the property a value matching this
+  // matcher should have.  The subject of the verb phrase is the value
+  // being matched.  For example, the DescribeTo() method of the Gt(7)
+  // matcher prints "is greater than 7".
+  virtual void DescribeTo(::std::ostream* os) const = 0;
+
+  // Describes the negation of this matcher to an ostream.  For
+  // example, if the description of this matcher is "is greater than
+  // 7", the negated description could be "is not greater than 7".
+  // You are not required to override this when implementing
+  // MatcherInterface, but it is highly advised so that your matcher
+  // can produce good error messages.
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "not (";
+    DescribeTo(os);
+    *os << ")";
+  }
+};
+
+// The implementation of a matcher.
+template <typename T>
+class MatcherInterface : public MatcherDescriberInterface {
+ public:
+  // Returns true iff the matcher matches x; also explains the match
+  // result to 'listener' if necessary (see the next paragraph), in
+  // the form of a non-restrictive relative clause ("which ...",
+  // "whose ...", etc) that describes x.  For example, the
+  // MatchAndExplain() method of the Pointee(...) matcher should
+  // generate an explanation like "which points to ...".
+  //
+  // Implementations of MatchAndExplain() should add an explanation of
+  // the match result *if and only if* they can provide additional
+  // information that's not already present (or not obvious) in the
+  // print-out of x and the matcher's description.  Whether the match
+  // succeeds is not a factor in deciding whether an explanation is
+  // needed, as sometimes the caller needs to print a failure message
+  // when the match succeeds (e.g. when the matcher is used inside
+  // Not()).
+  //
+  // For example, a "has at least 10 elements" matcher should explain
+  // what the actual element count is, regardless of the match result,
+  // as it is useful information to the reader; on the other hand, an
+  // "is empty" matcher probably only needs to explain what the actual
+  // size is when the match fails, as it's redundant to say that the
+  // size is 0 when the value is already known to be empty.
+  //
+  // You should override this method when defining a new matcher.
+  //
+  // It's the responsibility of the caller (Google Mock) to guarantee
+  // that 'listener' is not NULL.  This helps to simplify a matcher's
+  // implementation when it doesn't care about the performance, as it
+  // can talk to 'listener' without checking its validity first.
+  // However, in order to implement dummy listeners efficiently,
+  // listener->stream() may be NULL.
+  virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0;
+
+  // Inherits these methods from MatcherDescriberInterface:
+  //   virtual void DescribeTo(::std::ostream* os) const = 0;
+  //   virtual void DescribeNegationTo(::std::ostream* os) const;
+};
+
+// A match result listener that stores the explanation in a string.
+class StringMatchResultListener : public MatchResultListener {
+ public:
+  StringMatchResultListener() : MatchResultListener(&ss_) {}
+
+  // Returns the explanation accumulated so far.
+  internal::string str() const { return ss_.str(); }
+
+  // Clears the explanation accumulated so far.
+  void Clear() { ss_.str(""); }
+
+ private:
+  ::std::stringstream ss_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener);
+};
+
+namespace internal {
+
+struct AnyEq {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a == b; }
+};
+struct AnyNe {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a != b; }
+};
+struct AnyLt {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a < b; }
+};
+struct AnyGt {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a > b; }
+};
+struct AnyLe {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a <= b; }
+};
+struct AnyGe {
+  template <typename A, typename B>
+  bool operator()(const A& a, const B& b) const { return a >= b; }
+};
+
+// A match result listener that ignores the explanation.
+class DummyMatchResultListener : public MatchResultListener {
+ public:
+  DummyMatchResultListener() : MatchResultListener(NULL) {}
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener);
+};
+
+// A match result listener that forwards the explanation to a given
+// ostream.  The difference between this and MatchResultListener is
+// that the former is concrete.
+class StreamMatchResultListener : public MatchResultListener {
+ public:
+  explicit StreamMatchResultListener(::std::ostream* os)
+      : MatchResultListener(os) {}
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener);
+};
+
+// An internal class for implementing Matcher<T>, which will derive
+// from it.  We put functionalities common to all Matcher<T>
+// specializations here to avoid code duplication.
+template <typename T>
+class MatcherBase {
+ public:
+  // Returns true iff the matcher matches x; also explains the match
+  // result to 'listener'.
+  bool MatchAndExplain(T x, MatchResultListener* listener) const {
+    return impl_->MatchAndExplain(x, listener);
+  }
+
+  // Returns true iff this matcher matches x.
+  bool Matches(T x) const {
+    DummyMatchResultListener dummy;
+    return MatchAndExplain(x, &dummy);
+  }
+
+  // Describes this matcher to an ostream.
+  void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); }
+
+  // Describes the negation of this matcher to an ostream.
+  void DescribeNegationTo(::std::ostream* os) const {
+    impl_->DescribeNegationTo(os);
+  }
+
+  // Explains why x matches, or doesn't match, the matcher.
+  void ExplainMatchResultTo(T x, ::std::ostream* os) const {
+    StreamMatchResultListener listener(os);
+    MatchAndExplain(x, &listener);
+  }
+
+  // Returns the describer for this matcher object; retains ownership
+  // of the describer, which is only guaranteed to be alive when
+  // this matcher object is alive.
+  const MatcherDescriberInterface* GetDescriber() const {
+    return impl_.get();
+  }
+
+ protected:
+  MatcherBase() {}
+
+  // Constructs a matcher from its implementation.
+  explicit MatcherBase(const MatcherInterface<T>* impl)
+      : impl_(impl) {}
+
+  virtual ~MatcherBase() {}
+
+ private:
+  // shared_ptr (util/gtl/shared_ptr.h) and linked_ptr have similar
+  // interfaces.  The former dynamically allocates a chunk of memory
+  // to hold the reference count, while the latter tracks all
+  // references using a circular linked list without allocating
+  // memory.  It has been observed that linked_ptr performs better in
+  // typical scenarios.  However, shared_ptr can out-perform
+  // linked_ptr when there are many more uses of the copy constructor
+  // than the default constructor.
+  //
+  // If performance becomes a problem, we should see if using
+  // shared_ptr helps.
+  ::testing::internal::linked_ptr<const MatcherInterface<T> > impl_;
+};
+
+}  // namespace internal
+
+// A Matcher<T> is a copyable and IMMUTABLE (except by assignment)
+// object that can check whether a value of type T matches.  The
+// implementation of Matcher<T> is just a linked_ptr to const
+// MatcherInterface<T>, so copying is fairly cheap.  Don't inherit
+// from Matcher!
+template <typename T>
+class Matcher : public internal::MatcherBase<T> {
+ public:
+  // Constructs a null matcher.  Needed for storing Matcher objects in STL
+  // containers.  A default-constructed matcher is not yet initialized.  You
+  // cannot use it until a valid value has been assigned to it.
+  explicit Matcher() {}  // NOLINT
+
+  // Constructs a matcher from its implementation.
+  explicit Matcher(const MatcherInterface<T>* impl)
+      : internal::MatcherBase<T>(impl) {}
+
+  // Implicit constructor here allows people to write
+  // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes
+  Matcher(T value);  // NOLINT
+};
+
+// The following two specializations allow the user to write str
+// instead of Eq(str) and "foo" instead of Eq("foo") when a string
+// matcher is expected.
+template <>
+class GTEST_API_ Matcher<const internal::string&>
+    : public internal::MatcherBase<const internal::string&> {
+ public:
+  Matcher() {}
+
+  explicit Matcher(const MatcherInterface<const internal::string&>* impl)
+      : internal::MatcherBase<const internal::string&>(impl) {}
+
+  // Allows the user to write str instead of Eq(str) sometimes, where
+  // str is a string object.
+  Matcher(const internal::string& s);  // NOLINT
+
+  // Allows the user to write "foo" instead of Eq("foo") sometimes.
+  Matcher(const char* s);  // NOLINT
+};
+
+template <>
+class GTEST_API_ Matcher<internal::string>
+    : public internal::MatcherBase<internal::string> {
+ public:
+  Matcher() {}
+
+  explicit Matcher(const MatcherInterface<internal::string>* impl)
+      : internal::MatcherBase<internal::string>(impl) {}
+
+  // Allows the user to write str instead of Eq(str) sometimes, where
+  // str is a string object.
+  Matcher(const internal::string& s);  // NOLINT
+
+  // Allows the user to write "foo" instead of Eq("foo") sometimes.
+  Matcher(const char* s);  // NOLINT
+};
+
+#if GTEST_HAS_STRING_PIECE_
+// The following two specializations allow the user to write str
+// instead of Eq(str) and "foo" instead of Eq("foo") when a StringPiece
+// matcher is expected.
+template <>
+class GTEST_API_ Matcher<const StringPiece&>
+    : public internal::MatcherBase<const StringPiece&> {
+ public:
+  Matcher() {}
+
+  explicit Matcher(const MatcherInterface<const StringPiece&>* impl)
+      : internal::MatcherBase<const StringPiece&>(impl) {}
+
+  // Allows the user to write str instead of Eq(str) sometimes, where
+  // str is a string object.
+  Matcher(const internal::string& s);  // NOLINT
+
+  // Allows the user to write "foo" instead of Eq("foo") sometimes.
+  Matcher(const char* s);  // NOLINT
+
+  // Allows the user to pass StringPieces directly.
+  Matcher(StringPiece s);  // NOLINT
+};
+
+template <>
+class GTEST_API_ Matcher<StringPiece>
+    : public internal::MatcherBase<StringPiece> {
+ public:
+  Matcher() {}
+
+  explicit Matcher(const MatcherInterface<StringPiece>* impl)
+      : internal::MatcherBase<StringPiece>(impl) {}
+
+  // Allows the user to write str instead of Eq(str) sometimes, where
+  // str is a string object.
+  Matcher(const internal::string& s);  // NOLINT
+
+  // Allows the user to write "foo" instead of Eq("foo") sometimes.
+  Matcher(const char* s);  // NOLINT
+
+  // Allows the user to pass StringPieces directly.
+  Matcher(StringPiece s);  // NOLINT
+};
+#endif  // GTEST_HAS_STRING_PIECE_
+
+// The PolymorphicMatcher class template makes it easy to implement a
+// polymorphic matcher (i.e. a matcher that can match values of more
+// than one type, e.g. Eq(n) and NotNull()).
+//
+// To define a polymorphic matcher, a user should provide an Impl
+// class that has a DescribeTo() method and a DescribeNegationTo()
+// method, and define a member function (or member function template)
+//
+//   bool MatchAndExplain(const Value& value,
+//                        MatchResultListener* listener) const;
+//
+// See the definition of NotNull() for a complete example.
+template <class Impl>
+class PolymorphicMatcher {
+ public:
+  explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {}
+
+  // Returns a mutable reference to the underlying matcher
+  // implementation object.
+  Impl& mutable_impl() { return impl_; }
+
+  // Returns an immutable reference to the underlying matcher
+  // implementation object.
+  const Impl& impl() const { return impl_; }
+
+  template <typename T>
+  operator Matcher<T>() const {
+    return Matcher<T>(new MonomorphicImpl<T>(impl_));
+  }
+
+ private:
+  template <typename T>
+  class MonomorphicImpl : public MatcherInterface<T> {
+   public:
+    explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      impl_.DescribeTo(os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      impl_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+      return impl_.MatchAndExplain(x, listener);
+    }
+
+   private:
+    const Impl impl_;
+
+    GTEST_DISALLOW_ASSIGN_(MonomorphicImpl);
+  };
+
+  Impl impl_;
+
+  GTEST_DISALLOW_ASSIGN_(PolymorphicMatcher);
+};
+
+// Creates a matcher from its implementation.  This is easier to use
+// than the Matcher<T> constructor as it doesn't require you to
+// explicitly write the template argument, e.g.
+//
+//   MakeMatcher(foo);
+// vs
+//   Matcher<const string&>(foo);
+template <typename T>
+inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
+  return Matcher<T>(impl);
+}
+
+// Creates a polymorphic matcher from its implementation.  This is
+// easier to use than the PolymorphicMatcher<Impl> constructor as it
+// doesn't require you to explicitly write the template argument, e.g.
+//
+//   MakePolymorphicMatcher(foo);
+// vs
+//   PolymorphicMatcher<TypeOfFoo>(foo);
+template <class Impl>
+inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) {
+  return PolymorphicMatcher<Impl>(impl);
+}
+
+// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION
+// and MUST NOT BE USED IN USER CODE!!!
+namespace internal {
+
+// The MatcherCastImpl class template is a helper for implementing
+// MatcherCast().  We need this helper in order to partially
+// specialize the implementation of MatcherCast() (C++ allows
+// class/struct templates to be partially specialized, but not
+// function templates.).
+
+// This general version is used when MatcherCast()'s argument is a
+// polymorphic matcher (i.e. something that can be converted to a
+// Matcher but is not one yet; for example, Eq(value)) or a value (for
+// example, "hello").
+template <typename T, typename M>
+class MatcherCastImpl {
+ public:
+  static Matcher<T> Cast(const M& polymorphic_matcher_or_value) {
+    // M can be a polymorhic matcher, in which case we want to use
+    // its conversion operator to create Matcher<T>.  Or it can be a value
+    // that should be passed to the Matcher<T>'s constructor.
+    //
+    // We can't call Matcher<T>(polymorphic_matcher_or_value) when M is a
+    // polymorphic matcher because it'll be ambiguous if T has an implicit
+    // constructor from M (this usually happens when T has an implicit
+    // constructor from any type).
+    //
+    // It won't work to unconditionally implict_cast
+    // polymorphic_matcher_or_value to Matcher<T> because it won't trigger
+    // a user-defined conversion from M to T if one exists (assuming M is
+    // a value).
+    return CastImpl(
+        polymorphic_matcher_or_value,
+        BooleanConstant<
+            internal::ImplicitlyConvertible<M, Matcher<T> >::value>());
+  }
+
+ private:
+  static Matcher<T> CastImpl(const M& value, BooleanConstant<false>) {
+    // M can't be implicitly converted to Matcher<T>, so M isn't a polymorphic
+    // matcher.  It must be a value then.  Use direct initialization to create
+    // a matcher.
+    return Matcher<T>(ImplicitCast_<T>(value));
+  }
+
+  static Matcher<T> CastImpl(const M& polymorphic_matcher_or_value,
+                             BooleanConstant<true>) {
+    // M is implicitly convertible to Matcher<T>, which means that either
+    // M is a polymorhpic matcher or Matcher<T> has an implicit constructor
+    // from M.  In both cases using the implicit conversion will produce a
+    // matcher.
+    //
+    // Even if T has an implicit constructor from M, it won't be called because
+    // creating Matcher<T> would require a chain of two user-defined conversions
+    // (first to create T from M and then to create Matcher<T> from T).
+    return polymorphic_matcher_or_value;
+  }
+};
+
+// This more specialized version is used when MatcherCast()'s argument
+// is already a Matcher.  This only compiles when type T can be
+// statically converted to type U.
+template <typename T, typename U>
+class MatcherCastImpl<T, Matcher<U> > {
+ public:
+  static Matcher<T> Cast(const Matcher<U>& source_matcher) {
+    return Matcher<T>(new Impl(source_matcher));
+  }
+
+ private:
+  class Impl : public MatcherInterface<T> {
+   public:
+    explicit Impl(const Matcher<U>& source_matcher)
+        : source_matcher_(source_matcher) {}
+
+    // We delegate the matching logic to the source matcher.
+    virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+      return source_matcher_.MatchAndExplain(static_cast<U>(x), listener);
+    }
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      source_matcher_.DescribeTo(os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      source_matcher_.DescribeNegationTo(os);
+    }
+
+   private:
+    const Matcher<U> source_matcher_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+};
+
+// This even more specialized version is used for efficiently casting
+// a matcher to its own type.
+template <typename T>
+class MatcherCastImpl<T, Matcher<T> > {
+ public:
+  static Matcher<T> Cast(const Matcher<T>& matcher) { return matcher; }
+};
+
+}  // namespace internal
+
+// In order to be safe and clear, casting between different matcher
+// types is done explicitly via MatcherCast<T>(m), which takes a
+// matcher m and returns a Matcher<T>.  It compiles only when T can be
+// statically converted to the argument type of m.
+template <typename T, typename M>
+inline Matcher<T> MatcherCast(const M& matcher) {
+  return internal::MatcherCastImpl<T, M>::Cast(matcher);
+}
+
+// Implements SafeMatcherCast().
+//
+// We use an intermediate class to do the actual safe casting as Nokia's
+// Symbian compiler cannot decide between
+// template <T, M> ... (M) and
+// template <T, U> ... (const Matcher<U>&)
+// for function templates but can for member function templates.
+template <typename T>
+class SafeMatcherCastImpl {
+ public:
+  // This overload handles polymorphic matchers and values only since
+  // monomorphic matchers are handled by the next one.
+  template <typename M>
+  static inline Matcher<T> Cast(const M& polymorphic_matcher_or_value) {
+    return internal::MatcherCastImpl<T, M>::Cast(polymorphic_matcher_or_value);
+  }
+
+  // This overload handles monomorphic matchers.
+  //
+  // In general, if type T can be implicitly converted to type U, we can
+  // safely convert a Matcher<U> to a Matcher<T> (i.e. Matcher is
+  // contravariant): just keep a copy of the original Matcher<U>, convert the
+  // argument from type T to U, and then pass it to the underlying Matcher<U>.
+  // The only exception is when U is a reference and T is not, as the
+  // underlying Matcher<U> may be interested in the argument's address, which
+  // is not preserved in the conversion from T to U.
+  template <typename U>
+  static inline Matcher<T> Cast(const Matcher<U>& matcher) {
+    // Enforce that T can be implicitly converted to U.
+    GTEST_COMPILE_ASSERT_((internal::ImplicitlyConvertible<T, U>::value),
+                          T_must_be_implicitly_convertible_to_U);
+    // Enforce that we are not converting a non-reference type T to a reference
+    // type U.
+    GTEST_COMPILE_ASSERT_(
+        internal::is_reference<T>::value || !internal::is_reference<U>::value,
+        cannot_convert_non_referentce_arg_to_reference);
+    // In case both T and U are arithmetic types, enforce that the
+    // conversion is not lossy.
+    typedef GTEST_REMOVE_REFERENCE_AND_CONST_(T) RawT;
+    typedef GTEST_REMOVE_REFERENCE_AND_CONST_(U) RawU;
+    const bool kTIsOther = GMOCK_KIND_OF_(RawT) == internal::kOther;
+    const bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther;
+    GTEST_COMPILE_ASSERT_(
+        kTIsOther || kUIsOther ||
+        (internal::LosslessArithmeticConvertible<RawT, RawU>::value),
+        conversion_of_arithmetic_types_must_be_lossless);
+    return MatcherCast<T>(matcher);
+  }
+};
+
+template <typename T, typename M>
+inline Matcher<T> SafeMatcherCast(const M& polymorphic_matcher) {
+  return SafeMatcherCastImpl<T>::Cast(polymorphic_matcher);
+}
+
+// A<T>() returns a matcher that matches any value of type T.
+template <typename T>
+Matcher<T> A();
+
+// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION
+// and MUST NOT BE USED IN USER CODE!!!
+namespace internal {
+
+// If the explanation is not empty, prints it to the ostream.
+inline void PrintIfNotEmpty(const internal::string& explanation,
+                            ::std::ostream* os) {
+  if (explanation != "" && os != NULL) {
+    *os << ", " << explanation;
+  }
+}
+
+// Returns true if the given type name is easy to read by a human.
+// This is used to decide whether printing the type of a value might
+// be helpful.
+inline bool IsReadableTypeName(const string& type_name) {
+  // We consider a type name readable if it's short or doesn't contain
+  // a template or function type.
+  return (type_name.length() <= 20 ||
+          type_name.find_first_of("<(") == string::npos);
+}
+
+// Matches the value against the given matcher, prints the value and explains
+// the match result to the listener. Returns the match result.
+// 'listener' must not be NULL.
+// Value cannot be passed by const reference, because some matchers take a
+// non-const argument.
+template <typename Value, typename T>
+bool MatchPrintAndExplain(Value& value, const Matcher<T>& matcher,
+                          MatchResultListener* listener) {
+  if (!listener->IsInterested()) {
+    // If the listener is not interested, we do not need to construct the
+    // inner explanation.
+    return matcher.Matches(value);
+  }
+
+  StringMatchResultListener inner_listener;
+  const bool match = matcher.MatchAndExplain(value, &inner_listener);
+
+  UniversalPrint(value, listener->stream());
+#if GTEST_HAS_RTTI
+  const string& type_name = GetTypeName<Value>();
+  if (IsReadableTypeName(type_name))
+    *listener->stream() << " (of type " << type_name << ")";
+#endif
+  PrintIfNotEmpty(inner_listener.str(), listener->stream());
+
+  return match;
+}
+
+// An internal helper class for doing compile-time loop on a tuple's
+// fields.
+template <size_t N>
+class TuplePrefix {
+ public:
+  // TuplePrefix<N>::Matches(matcher_tuple, value_tuple) returns true
+  // iff the first N fields of matcher_tuple matches the first N
+  // fields of value_tuple, respectively.
+  template <typename MatcherTuple, typename ValueTuple>
+  static bool Matches(const MatcherTuple& matcher_tuple,
+                      const ValueTuple& value_tuple) {
+    return TuplePrefix<N - 1>::Matches(matcher_tuple, value_tuple)
+        && get<N - 1>(matcher_tuple).Matches(get<N - 1>(value_tuple));
+  }
+
+  // TuplePrefix<N>::ExplainMatchFailuresTo(matchers, values, os)
+  // describes failures in matching the first N fields of matchers
+  // against the first N fields of values.  If there is no failure,
+  // nothing will be streamed to os.
+  template <typename MatcherTuple, typename ValueTuple>
+  static void ExplainMatchFailuresTo(const MatcherTuple& matchers,
+                                     const ValueTuple& values,
+                                     ::std::ostream* os) {
+    // First, describes failures in the first N - 1 fields.
+    TuplePrefix<N - 1>::ExplainMatchFailuresTo(matchers, values, os);
+
+    // Then describes the failure (if any) in the (N - 1)-th (0-based)
+    // field.
+    typename tuple_element<N - 1, MatcherTuple>::type matcher =
+        get<N - 1>(matchers);
+    typedef typename tuple_element<N - 1, ValueTuple>::type Value;
+    Value value = get<N - 1>(values);
+    StringMatchResultListener listener;
+    if (!matcher.MatchAndExplain(value, &listener)) {
+      // TODO(wan): include in the message the name of the parameter
+      // as used in MOCK_METHOD*() when possible.
+      *os << "  Expected arg #" << N - 1 << ": ";
+      get<N - 1>(matchers).DescribeTo(os);
+      *os << "\n           Actual: ";
+      // We remove the reference in type Value to prevent the
+      // universal printer from printing the address of value, which
+      // isn't interesting to the user most of the time.  The
+      // matcher's MatchAndExplain() method handles the case when
+      // the address is interesting.
+      internal::UniversalPrint(value, os);
+      PrintIfNotEmpty(listener.str(), os);
+      *os << "\n";
+    }
+  }
+};
+
+// The base case.
+template <>
+class TuplePrefix<0> {
+ public:
+  template <typename MatcherTuple, typename ValueTuple>
+  static bool Matches(const MatcherTuple& /* matcher_tuple */,
+                      const ValueTuple& /* value_tuple */) {
+    return true;
+  }
+
+  template <typename MatcherTuple, typename ValueTuple>
+  static void ExplainMatchFailuresTo(const MatcherTuple& /* matchers */,
+                                     const ValueTuple& /* values */,
+                                     ::std::ostream* /* os */) {}
+};
+
+// TupleMatches(matcher_tuple, value_tuple) returns true iff all
+// matchers in matcher_tuple match the corresponding fields in
+// value_tuple.  It is a compiler error if matcher_tuple and
+// value_tuple have different number of fields or incompatible field
+// types.
+template <typename MatcherTuple, typename ValueTuple>
+bool TupleMatches(const MatcherTuple& matcher_tuple,
+                  const ValueTuple& value_tuple) {
+  // Makes sure that matcher_tuple and value_tuple have the same
+  // number of fields.
+  GTEST_COMPILE_ASSERT_(tuple_size<MatcherTuple>::value ==
+                        tuple_size<ValueTuple>::value,
+                        matcher_and_value_have_different_numbers_of_fields);
+  return TuplePrefix<tuple_size<ValueTuple>::value>::
+      Matches(matcher_tuple, value_tuple);
+}
+
+// Describes failures in matching matchers against values.  If there
+// is no failure, nothing will be streamed to os.
+template <typename MatcherTuple, typename ValueTuple>
+void ExplainMatchFailureTupleTo(const MatcherTuple& matchers,
+                                const ValueTuple& values,
+                                ::std::ostream* os) {
+  TuplePrefix<tuple_size<MatcherTuple>::value>::ExplainMatchFailuresTo(
+      matchers, values, os);
+}
+
+// TransformTupleValues and its helper.
+//
+// TransformTupleValuesHelper hides the internal machinery that
+// TransformTupleValues uses to implement a tuple traversal.
+template <typename Tuple, typename Func, typename OutIter>
+class TransformTupleValuesHelper {
+ private:
+  typedef ::testing::tuple_size<Tuple> TupleSize;
+
+ public:
+  // For each member of tuple 't', taken in order, evaluates '*out++ = f(t)'.
+  // Returns the final value of 'out' in case the caller needs it.
+  static OutIter Run(Func f, const Tuple& t, OutIter out) {
+    return IterateOverTuple<Tuple, TupleSize::value>()(f, t, out);
+  }
+
+ private:
+  template <typename Tup, size_t kRemainingSize>
+  struct IterateOverTuple {
+    OutIter operator() (Func f, const Tup& t, OutIter out) const {
+      *out++ = f(::testing::get<TupleSize::value - kRemainingSize>(t));
+      return IterateOverTuple<Tup, kRemainingSize - 1>()(f, t, out);
+    }
+  };
+  template <typename Tup>
+  struct IterateOverTuple<Tup, 0> {
+    OutIter operator() (Func /* f */, const Tup& /* t */, OutIter out) const {
+      return out;
+    }
+  };
+};
+
+// Successively invokes 'f(element)' on each element of the tuple 't',
+// appending each result to the 'out' iterator. Returns the final value
+// of 'out'.
+template <typename Tuple, typename Func, typename OutIter>
+OutIter TransformTupleValues(Func f, const Tuple& t, OutIter out) {
+  return TransformTupleValuesHelper<Tuple, Func, OutIter>::Run(f, t, out);
+}
+
+// Implements A<T>().
+template <typename T>
+class AnyMatcherImpl : public MatcherInterface<T> {
+ public:
+  virtual bool MatchAndExplain(
+      T /* x */, MatchResultListener* /* listener */) const { return true; }
+  virtual void DescribeTo(::std::ostream* os) const { *os << "is anything"; }
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    // This is mostly for completeness' safe, as it's not very useful
+    // to write Not(A<bool>()).  However we cannot completely rule out
+    // such a possibility, and it doesn't hurt to be prepared.
+    *os << "never matches";
+  }
+};
+
+// Implements _, a matcher that matches any value of any
+// type.  This is a polymorphic matcher, so we need a template type
+// conversion operator to make it appearing as a Matcher<T> for any
+// type T.
+class AnythingMatcher {
+ public:
+  template <typename T>
+  operator Matcher<T>() const { return A<T>(); }
+};
+
+// Implements a matcher that compares a given value with a
+// pre-supplied value using one of the ==, <=, <, etc, operators.  The
+// two values being compared don't have to have the same type.
+//
+// The matcher defined here is polymorphic (for example, Eq(5) can be
+// used to match an int, a short, a double, etc).  Therefore we use
+// a template type conversion operator in the implementation.
+//
+// The following template definition assumes that the Rhs parameter is
+// a "bare" type (i.e. neither 'const T' nor 'T&').
+template <typename D, typename Rhs, typename Op>
+class ComparisonBase {
+ public:
+  explicit ComparisonBase(const Rhs& rhs) : rhs_(rhs) {}
+  template <typename Lhs>
+  operator Matcher<Lhs>() const {
+    return MakeMatcher(new Impl<Lhs>(rhs_));
+  }
+
+ private:
+  template <typename Lhs>
+  class Impl : public MatcherInterface<Lhs> {
+   public:
+    explicit Impl(const Rhs& rhs) : rhs_(rhs) {}
+    virtual bool MatchAndExplain(
+        Lhs lhs, MatchResultListener* /* listener */) const {
+      return Op()(lhs, rhs_);
+    }
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << D::Desc() << " ";
+      UniversalPrint(rhs_, os);
+    }
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << D::NegatedDesc() <<  " ";
+      UniversalPrint(rhs_, os);
+    }
+   private:
+    Rhs rhs_;
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+  Rhs rhs_;
+  GTEST_DISALLOW_ASSIGN_(ComparisonBase);
+};
+
+template <typename Rhs>
+class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq> {
+ public:
+  explicit EqMatcher(const Rhs& rhs)
+      : ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs) { }
+  static const char* Desc() { return "is equal to"; }
+  static const char* NegatedDesc() { return "isn't equal to"; }
+};
+template <typename Rhs>
+class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe> {
+ public:
+  explicit NeMatcher(const Rhs& rhs)
+      : ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs) { }
+  static const char* Desc() { return "isn't equal to"; }
+  static const char* NegatedDesc() { return "is equal to"; }
+};
+template <typename Rhs>
+class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt> {
+ public:
+  explicit LtMatcher(const Rhs& rhs)
+      : ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs) { }
+  static const char* Desc() { return "is <"; }
+  static const char* NegatedDesc() { return "isn't <"; }
+};
+template <typename Rhs>
+class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt> {
+ public:
+  explicit GtMatcher(const Rhs& rhs)
+      : ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs) { }
+  static const char* Desc() { return "is >"; }
+  static const char* NegatedDesc() { return "isn't >"; }
+};
+template <typename Rhs>
+class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe> {
+ public:
+  explicit LeMatcher(const Rhs& rhs)
+      : ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs) { }
+  static const char* Desc() { return "is <="; }
+  static const char* NegatedDesc() { return "isn't <="; }
+};
+template <typename Rhs>
+class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe> {
+ public:
+  explicit GeMatcher(const Rhs& rhs)
+      : ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs) { }
+  static const char* Desc() { return "is >="; }
+  static const char* NegatedDesc() { return "isn't >="; }
+};
+
+// Implements the polymorphic IsNull() matcher, which matches any raw or smart
+// pointer that is NULL.
+class IsNullMatcher {
+ public:
+  template <typename Pointer>
+  bool MatchAndExplain(const Pointer& p,
+                       MatchResultListener* /* listener */) const {
+#if GTEST_LANG_CXX11
+    return p == nullptr;
+#else  // GTEST_LANG_CXX11
+    return GetRawPointer(p) == NULL;
+#endif  // GTEST_LANG_CXX11
+  }
+
+  void DescribeTo(::std::ostream* os) const { *os << "is NULL"; }
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "isn't NULL";
+  }
+};
+
+// Implements the polymorphic NotNull() matcher, which matches any raw or smart
+// pointer that is not NULL.
+class NotNullMatcher {
+ public:
+  template <typename Pointer>
+  bool MatchAndExplain(const Pointer& p,
+                       MatchResultListener* /* listener */) const {
+#if GTEST_LANG_CXX11
+    return p != nullptr;
+#else  // GTEST_LANG_CXX11
+    return GetRawPointer(p) != NULL;
+#endif  // GTEST_LANG_CXX11
+  }
+
+  void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; }
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "is NULL";
+  }
+};
+
+// Ref(variable) matches any argument that is a reference to
+// 'variable'.  This matcher is polymorphic as it can match any
+// super type of the type of 'variable'.
+//
+// The RefMatcher template class implements Ref(variable).  It can
+// only be instantiated with a reference type.  This prevents a user
+// from mistakenly using Ref(x) to match a non-reference function
+// argument.  For example, the following will righteously cause a
+// compiler error:
+//
+//   int n;
+//   Matcher<int> m1 = Ref(n);   // This won't compile.
+//   Matcher<int&> m2 = Ref(n);  // This will compile.
+template <typename T>
+class RefMatcher;
+
+template <typename T>
+class RefMatcher<T&> {
+  // Google Mock is a generic framework and thus needs to support
+  // mocking any function types, including those that take non-const
+  // reference arguments.  Therefore the template parameter T (and
+  // Super below) can be instantiated to either a const type or a
+  // non-const type.
+ public:
+  // RefMatcher() takes a T& instead of const T&, as we want the
+  // compiler to catch using Ref(const_value) as a matcher for a
+  // non-const reference.
+  explicit RefMatcher(T& x) : object_(x) {}  // NOLINT
+
+  template <typename Super>
+  operator Matcher<Super&>() const {
+    // By passing object_ (type T&) to Impl(), which expects a Super&,
+    // we make sure that Super is a super type of T.  In particular,
+    // this catches using Ref(const_value) as a matcher for a
+    // non-const reference, as you cannot implicitly convert a const
+    // reference to a non-const reference.
+    return MakeMatcher(new Impl<Super>(object_));
+  }
+
+ private:
+  template <typename Super>
+  class Impl : public MatcherInterface<Super&> {
+   public:
+    explicit Impl(Super& x) : object_(x) {}  // NOLINT
+
+    // MatchAndExplain() takes a Super& (as opposed to const Super&)
+    // in order to match the interface MatcherInterface<Super&>.
+    virtual bool MatchAndExplain(
+        Super& x, MatchResultListener* listener) const {
+      *listener << "which is located @" << static_cast<const void*>(&x);
+      return &x == &object_;
+    }
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "references the variable ";
+      UniversalPrinter<Super&>::Print(object_, os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "does not reference the variable ";
+      UniversalPrinter<Super&>::Print(object_, os);
+    }
+
+   private:
+    const Super& object_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  T& object_;
+
+  GTEST_DISALLOW_ASSIGN_(RefMatcher);
+};
+
+// Polymorphic helper functions for narrow and wide string matchers.
+inline bool CaseInsensitiveCStringEquals(const char* lhs, const char* rhs) {
+  return String::CaseInsensitiveCStringEquals(lhs, rhs);
+}
+
+inline bool CaseInsensitiveCStringEquals(const wchar_t* lhs,
+                                         const wchar_t* rhs) {
+  return String::CaseInsensitiveWideCStringEquals(lhs, rhs);
+}
+
+// String comparison for narrow or wide strings that can have embedded NUL
+// characters.
+template <typename StringType>
+bool CaseInsensitiveStringEquals(const StringType& s1,
+                                 const StringType& s2) {
+  // Are the heads equal?
+  if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) {
+    return false;
+  }
+
+  // Skip the equal heads.
+  const typename StringType::value_type nul = 0;
+  const size_t i1 = s1.find(nul), i2 = s2.find(nul);
+
+  // Are we at the end of either s1 or s2?
+  if (i1 == StringType::npos || i2 == StringType::npos) {
+    return i1 == i2;
+  }
+
+  // Are the tails equal?
+  return CaseInsensitiveStringEquals(s1.substr(i1 + 1), s2.substr(i2 + 1));
+}
+
+// String matchers.
+
+// Implements equality-based string matchers like StrEq, StrCaseNe, and etc.
+template <typename StringType>
+class StrEqualityMatcher {
+ public:
+  StrEqualityMatcher(const StringType& str, bool expect_eq,
+                     bool case_sensitive)
+      : string_(str), expect_eq_(expect_eq), case_sensitive_(case_sensitive) {}
+
+  // Accepts pointer types, particularly:
+  //   const char*
+  //   char*
+  //   const wchar_t*
+  //   wchar_t*
+  template <typename CharType>
+  bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+    if (s == NULL) {
+      return !expect_eq_;
+    }
+    return MatchAndExplain(StringType(s), listener);
+  }
+
+  // Matches anything that can convert to StringType.
+  //
+  // This is a template, not just a plain function with const StringType&,
+  // because StringPiece has some interfering non-explicit constructors.
+  template <typename MatcheeStringType>
+  bool MatchAndExplain(const MatcheeStringType& s,
+                       MatchResultListener* /* listener */) const {
+    const StringType& s2(s);
+    const bool eq = case_sensitive_ ? s2 == string_ :
+        CaseInsensitiveStringEquals(s2, string_);
+    return expect_eq_ == eq;
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    DescribeToHelper(expect_eq_, os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    DescribeToHelper(!expect_eq_, os);
+  }
+
+ private:
+  void DescribeToHelper(bool expect_eq, ::std::ostream* os) const {
+    *os << (expect_eq ? "is " : "isn't ");
+    *os << "equal to ";
+    if (!case_sensitive_) {
+      *os << "(ignoring case) ";
+    }
+    UniversalPrint(string_, os);
+  }
+
+  const StringType string_;
+  const bool expect_eq_;
+  const bool case_sensitive_;
+
+  GTEST_DISALLOW_ASSIGN_(StrEqualityMatcher);
+};
+
+// Implements the polymorphic HasSubstr(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class HasSubstrMatcher {
+ public:
+  explicit HasSubstrMatcher(const StringType& substring)
+      : substring_(substring) {}
+
+  // Accepts pointer types, particularly:
+  //   const char*
+  //   char*
+  //   const wchar_t*
+  //   wchar_t*
+  template <typename CharType>
+  bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+    return s != NULL && MatchAndExplain(StringType(s), listener);
+  }
+
+  // Matches anything that can convert to StringType.
+  //
+  // This is a template, not just a plain function with const StringType&,
+  // because StringPiece has some interfering non-explicit constructors.
+  template <typename MatcheeStringType>
+  bool MatchAndExplain(const MatcheeStringType& s,
+                       MatchResultListener* /* listener */) const {
+    const StringType& s2(s);
+    return s2.find(substring_) != StringType::npos;
+  }
+
+  // Describes what this matcher matches.
+  void DescribeTo(::std::ostream* os) const {
+    *os << "has substring ";
+    UniversalPrint(substring_, os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "has no substring ";
+    UniversalPrint(substring_, os);
+  }
+
+ private:
+  const StringType substring_;
+
+  GTEST_DISALLOW_ASSIGN_(HasSubstrMatcher);
+};
+
+// Implements the polymorphic StartsWith(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class StartsWithMatcher {
+ public:
+  explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) {
+  }
+
+  // Accepts pointer types, particularly:
+  //   const char*
+  //   char*
+  //   const wchar_t*
+  //   wchar_t*
+  template <typename CharType>
+  bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+    return s != NULL && MatchAndExplain(StringType(s), listener);
+  }
+
+  // Matches anything that can convert to StringType.
+  //
+  // This is a template, not just a plain function with const StringType&,
+  // because StringPiece has some interfering non-explicit constructors.
+  template <typename MatcheeStringType>
+  bool MatchAndExplain(const MatcheeStringType& s,
+                       MatchResultListener* /* listener */) const {
+    const StringType& s2(s);
+    return s2.length() >= prefix_.length() &&
+        s2.substr(0, prefix_.length()) == prefix_;
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "starts with ";
+    UniversalPrint(prefix_, os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't start with ";
+    UniversalPrint(prefix_, os);
+  }
+
+ private:
+  const StringType prefix_;
+
+  GTEST_DISALLOW_ASSIGN_(StartsWithMatcher);
+};
+
+// Implements the polymorphic EndsWith(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class EndsWithMatcher {
+ public:
+  explicit EndsWithMatcher(const StringType& suffix) : suffix_(suffix) {}
+
+  // Accepts pointer types, particularly:
+  //   const char*
+  //   char*
+  //   const wchar_t*
+  //   wchar_t*
+  template <typename CharType>
+  bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+    return s != NULL && MatchAndExplain(StringType(s), listener);
+  }
+
+  // Matches anything that can convert to StringType.
+  //
+  // This is a template, not just a plain function with const StringType&,
+  // because StringPiece has some interfering non-explicit constructors.
+  template <typename MatcheeStringType>
+  bool MatchAndExplain(const MatcheeStringType& s,
+                       MatchResultListener* /* listener */) const {
+    const StringType& s2(s);
+    return s2.length() >= suffix_.length() &&
+        s2.substr(s2.length() - suffix_.length()) == suffix_;
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "ends with ";
+    UniversalPrint(suffix_, os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't end with ";
+    UniversalPrint(suffix_, os);
+  }
+
+ private:
+  const StringType suffix_;
+
+  GTEST_DISALLOW_ASSIGN_(EndsWithMatcher);
+};
+
+// Implements polymorphic matchers MatchesRegex(regex) and
+// ContainsRegex(regex), which can be used as a Matcher<T> as long as
+// T can be converted to a string.
+class MatchesRegexMatcher {
+ public:
+  MatchesRegexMatcher(const RE* regex, bool full_match)
+      : regex_(regex), full_match_(full_match) {}
+
+  // Accepts pointer types, particularly:
+  //   const char*
+  //   char*
+  //   const wchar_t*
+  //   wchar_t*
+  template <typename CharType>
+  bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+    return s != NULL && MatchAndExplain(internal::string(s), listener);
+  }
+
+  // Matches anything that can convert to internal::string.
+  //
+  // This is a template, not just a plain function with const internal::string&,
+  // because StringPiece has some interfering non-explicit constructors.
+  template <class MatcheeStringType>
+  bool MatchAndExplain(const MatcheeStringType& s,
+                       MatchResultListener* /* listener */) const {
+    const internal::string& s2(s);
+    return full_match_ ? RE::FullMatch(s2, *regex_) :
+        RE::PartialMatch(s2, *regex_);
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << (full_match_ ? "matches" : "contains")
+        << " regular expression ";
+    UniversalPrinter<internal::string>::Print(regex_->pattern(), os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't " << (full_match_ ? "match" : "contain")
+        << " regular expression ";
+    UniversalPrinter<internal::string>::Print(regex_->pattern(), os);
+  }
+
+ private:
+  const internal::linked_ptr<const RE> regex_;
+  const bool full_match_;
+
+  GTEST_DISALLOW_ASSIGN_(MatchesRegexMatcher);
+};
+
+// Implements a matcher that compares the two fields of a 2-tuple
+// using one of the ==, <=, <, etc, operators.  The two fields being
+// compared don't have to have the same type.
+//
+// The matcher defined here is polymorphic (for example, Eq() can be
+// used to match a tuple<int, short>, a tuple<const long&, double>,
+// etc).  Therefore we use a template type conversion operator in the
+// implementation.
+template <typename D, typename Op>
+class PairMatchBase {
+ public:
+  template <typename T1, typename T2>
+  operator Matcher< ::testing::tuple<T1, T2> >() const {
+    return MakeMatcher(new Impl< ::testing::tuple<T1, T2> >);
+  }
+  template <typename T1, typename T2>
+  operator Matcher<const ::testing::tuple<T1, T2>&>() const {
+    return MakeMatcher(new Impl<const ::testing::tuple<T1, T2>&>);
+  }
+
+ private:
+  static ::std::ostream& GetDesc(::std::ostream& os) {  // NOLINT
+    return os << D::Desc();
+  }
+
+  template <typename Tuple>
+  class Impl : public MatcherInterface<Tuple> {
+   public:
+    virtual bool MatchAndExplain(
+        Tuple args,
+        MatchResultListener* /* listener */) const {
+      return Op()(::testing::get<0>(args), ::testing::get<1>(args));
+    }
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "are " << GetDesc;
+    }
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "aren't " << GetDesc;
+    }
+  };
+};
+
+class Eq2Matcher : public PairMatchBase<Eq2Matcher, AnyEq> {
+ public:
+  static const char* Desc() { return "an equal pair"; }
+};
+class Ne2Matcher : public PairMatchBase<Ne2Matcher, AnyNe> {
+ public:
+  static const char* Desc() { return "an unequal pair"; }
+};
+class Lt2Matcher : public PairMatchBase<Lt2Matcher, AnyLt> {
+ public:
+  static const char* Desc() { return "a pair where the first < the second"; }
+};
+class Gt2Matcher : public PairMatchBase<Gt2Matcher, AnyGt> {
+ public:
+  static const char* Desc() { return "a pair where the first > the second"; }
+};
+class Le2Matcher : public PairMatchBase<Le2Matcher, AnyLe> {
+ public:
+  static const char* Desc() { return "a pair where the first <= the second"; }
+};
+class Ge2Matcher : public PairMatchBase<Ge2Matcher, AnyGe> {
+ public:
+  static const char* Desc() { return "a pair where the first >= the second"; }
+};
+
+// Implements the Not(...) matcher for a particular argument type T.
+// We do not nest it inside the NotMatcher class template, as that
+// will prevent different instantiations of NotMatcher from sharing
+// the same NotMatcherImpl<T> class.
+template <typename T>
+class NotMatcherImpl : public MatcherInterface<T> {
+ public:
+  explicit NotMatcherImpl(const Matcher<T>& matcher)
+      : matcher_(matcher) {}
+
+  virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+    return !matcher_.MatchAndExplain(x, listener);
+  }
+
+  virtual void DescribeTo(::std::ostream* os) const {
+    matcher_.DescribeNegationTo(os);
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    matcher_.DescribeTo(os);
+  }
+
+ private:
+  const Matcher<T> matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(NotMatcherImpl);
+};
+
+// Implements the Not(m) matcher, which matches a value that doesn't
+// match matcher m.
+template <typename InnerMatcher>
+class NotMatcher {
+ public:
+  explicit NotMatcher(InnerMatcher matcher) : matcher_(matcher) {}
+
+  // This template type conversion operator allows Not(m) to be used
+  // to match any type m can match.
+  template <typename T>
+  operator Matcher<T>() const {
+    return Matcher<T>(new NotMatcherImpl<T>(SafeMatcherCast<T>(matcher_)));
+  }
+
+ private:
+  InnerMatcher matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(NotMatcher);
+};
+
+// Implements the AllOf(m1, m2) matcher for a particular argument type
+// T. We do not nest it inside the BothOfMatcher class template, as
+// that will prevent different instantiations of BothOfMatcher from
+// sharing the same BothOfMatcherImpl<T> class.
+template <typename T>
+class BothOfMatcherImpl : public MatcherInterface<T> {
+ public:
+  BothOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2)
+      : matcher1_(matcher1), matcher2_(matcher2) {}
+
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "(";
+    matcher1_.DescribeTo(os);
+    *os << ") and (";
+    matcher2_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "(";
+    matcher1_.DescribeNegationTo(os);
+    *os << ") or (";
+    matcher2_.DescribeNegationTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+    // If either matcher1_ or matcher2_ doesn't match x, we only need
+    // to explain why one of them fails.
+    StringMatchResultListener listener1;
+    if (!matcher1_.MatchAndExplain(x, &listener1)) {
+      *listener << listener1.str();
+      return false;
+    }
+
+    StringMatchResultListener listener2;
+    if (!matcher2_.MatchAndExplain(x, &listener2)) {
+      *listener << listener2.str();
+      return false;
+    }
+
+    // Otherwise we need to explain why *both* of them match.
+    const internal::string s1 = listener1.str();
+    const internal::string s2 = listener2.str();
+
+    if (s1 == "") {
+      *listener << s2;
+    } else {
+      *listener << s1;
+      if (s2 != "") {
+        *listener << ", and " << s2;
+      }
+    }
+    return true;
+  }
+
+ private:
+  const Matcher<T> matcher1_;
+  const Matcher<T> matcher2_;
+
+  GTEST_DISALLOW_ASSIGN_(BothOfMatcherImpl);
+};
+
+#if GTEST_LANG_CXX11
+// MatcherList provides mechanisms for storing a variable number of matchers in
+// a list structure (ListType) and creating a combining matcher from such a
+// list.
+// The template is defined recursively using the following template paramters:
+//   * kSize is the length of the MatcherList.
+//   * Head is the type of the first matcher of the list.
+//   * Tail denotes the types of the remaining matchers of the list.
+template <int kSize, typename Head, typename... Tail>
+struct MatcherList {
+  typedef MatcherList<kSize - 1, Tail...> MatcherListTail;
+  typedef ::std::pair<Head, typename MatcherListTail::ListType> ListType;
+
+  // BuildList stores variadic type values in a nested pair structure.
+  // Example:
+  // MatcherList<3, int, string, float>::BuildList(5, "foo", 2.0) will return
+  // the corresponding result of type pair<int, pair<string, float>>.
+  static ListType BuildList(const Head& matcher, const Tail&... tail) {
+    return ListType(matcher, MatcherListTail::BuildList(tail...));
+  }
+
+  // CreateMatcher<T> creates a Matcher<T> from a given list of matchers (built
+  // by BuildList()). CombiningMatcher<T> is used to combine the matchers of the
+  // list. CombiningMatcher<T> must implement MatcherInterface<T> and have a
+  // constructor taking two Matcher<T>s as input.
+  template <typename T, template <typename /* T */> class CombiningMatcher>
+  static Matcher<T> CreateMatcher(const ListType& matchers) {
+    return Matcher<T>(new CombiningMatcher<T>(
+        SafeMatcherCast<T>(matchers.first),
+        MatcherListTail::template CreateMatcher<T, CombiningMatcher>(
+            matchers.second)));
+  }
+};
+
+// The following defines the base case for the recursive definition of
+// MatcherList.
+template <typename Matcher1, typename Matcher2>
+struct MatcherList<2, Matcher1, Matcher2> {
+  typedef ::std::pair<Matcher1, Matcher2> ListType;
+
+  static ListType BuildList(const Matcher1& matcher1,
+                            const Matcher2& matcher2) {
+    return ::std::pair<Matcher1, Matcher2>(matcher1, matcher2);
+  }
+
+  template <typename T, template <typename /* T */> class CombiningMatcher>
+  static Matcher<T> CreateMatcher(const ListType& matchers) {
+    return Matcher<T>(new CombiningMatcher<T>(
+        SafeMatcherCast<T>(matchers.first),
+        SafeMatcherCast<T>(matchers.second)));
+  }
+};
+
+// VariadicMatcher is used for the variadic implementation of
+// AllOf(m_1, m_2, ...) and AnyOf(m_1, m_2, ...).
+// CombiningMatcher<T> is used to recursively combine the provided matchers
+// (of type Args...).
+template <template <typename T> class CombiningMatcher, typename... Args>
+class VariadicMatcher {
+ public:
+  VariadicMatcher(const Args&... matchers)  // NOLINT
+      : matchers_(MatcherListType::BuildList(matchers...)) {}
+
+  // This template type conversion operator allows an
+  // VariadicMatcher<Matcher1, Matcher2...> object to match any type that
+  // all of the provided matchers (Matcher1, Matcher2, ...) can match.
+  template <typename T>
+  operator Matcher<T>() const {
+    return MatcherListType::template CreateMatcher<T, CombiningMatcher>(
+        matchers_);
+  }
+
+ private:
+  typedef MatcherList<sizeof...(Args), Args...> MatcherListType;
+
+  const typename MatcherListType::ListType matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(VariadicMatcher);
+};
+
+template <typename... Args>
+using AllOfMatcher = VariadicMatcher<BothOfMatcherImpl, Args...>;
+
+#endif  // GTEST_LANG_CXX11
+
+// Used for implementing the AllOf(m_1, ..., m_n) matcher, which
+// matches a value that matches all of the matchers m_1, ..., and m_n.
+template <typename Matcher1, typename Matcher2>
+class BothOfMatcher {
+ public:
+  BothOfMatcher(Matcher1 matcher1, Matcher2 matcher2)
+      : matcher1_(matcher1), matcher2_(matcher2) {}
+
+  // This template type conversion operator allows a
+  // BothOfMatcher<Matcher1, Matcher2> object to match any type that
+  // both Matcher1 and Matcher2 can match.
+  template <typename T>
+  operator Matcher<T>() const {
+    return Matcher<T>(new BothOfMatcherImpl<T>(SafeMatcherCast<T>(matcher1_),
+                                               SafeMatcherCast<T>(matcher2_)));
+  }
+
+ private:
+  Matcher1 matcher1_;
+  Matcher2 matcher2_;
+
+  GTEST_DISALLOW_ASSIGN_(BothOfMatcher);
+};
+
+// Implements the AnyOf(m1, m2) matcher for a particular argument type
+// T.  We do not nest it inside the AnyOfMatcher class template, as
+// that will prevent different instantiations of AnyOfMatcher from
+// sharing the same EitherOfMatcherImpl<T> class.
+template <typename T>
+class EitherOfMatcherImpl : public MatcherInterface<T> {
+ public:
+  EitherOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2)
+      : matcher1_(matcher1), matcher2_(matcher2) {}
+
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "(";
+    matcher1_.DescribeTo(os);
+    *os << ") or (";
+    matcher2_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "(";
+    matcher1_.DescribeNegationTo(os);
+    *os << ") and (";
+    matcher2_.DescribeNegationTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+    // If either matcher1_ or matcher2_ matches x, we just need to
+    // explain why *one* of them matches.
+    StringMatchResultListener listener1;
+    if (matcher1_.MatchAndExplain(x, &listener1)) {
+      *listener << listener1.str();
+      return true;
+    }
+
+    StringMatchResultListener listener2;
+    if (matcher2_.MatchAndExplain(x, &listener2)) {
+      *listener << listener2.str();
+      return true;
+    }
+
+    // Otherwise we need to explain why *both* of them fail.
+    const internal::string s1 = listener1.str();
+    const internal::string s2 = listener2.str();
+
+    if (s1 == "") {
+      *listener << s2;
+    } else {
+      *listener << s1;
+      if (s2 != "") {
+        *listener << ", and " << s2;
+      }
+    }
+    return false;
+  }
+
+ private:
+  const Matcher<T> matcher1_;
+  const Matcher<T> matcher2_;
+
+  GTEST_DISALLOW_ASSIGN_(EitherOfMatcherImpl);
+};
+
+#if GTEST_LANG_CXX11
+// AnyOfMatcher is used for the variadic implementation of AnyOf(m_1, m_2, ...).
+template <typename... Args>
+using AnyOfMatcher = VariadicMatcher<EitherOfMatcherImpl, Args...>;
+
+#endif  // GTEST_LANG_CXX11
+
+// Used for implementing the AnyOf(m_1, ..., m_n) matcher, which
+// matches a value that matches at least one of the matchers m_1, ...,
+// and m_n.
+template <typename Matcher1, typename Matcher2>
+class EitherOfMatcher {
+ public:
+  EitherOfMatcher(Matcher1 matcher1, Matcher2 matcher2)
+      : matcher1_(matcher1), matcher2_(matcher2) {}
+
+  // This template type conversion operator allows a
+  // EitherOfMatcher<Matcher1, Matcher2> object to match any type that
+  // both Matcher1 and Matcher2 can match.
+  template <typename T>
+  operator Matcher<T>() const {
+    return Matcher<T>(new EitherOfMatcherImpl<T>(
+        SafeMatcherCast<T>(matcher1_), SafeMatcherCast<T>(matcher2_)));
+  }
+
+ private:
+  Matcher1 matcher1_;
+  Matcher2 matcher2_;
+
+  GTEST_DISALLOW_ASSIGN_(EitherOfMatcher);
+};
+
+// Used for implementing Truly(pred), which turns a predicate into a
+// matcher.
+template <typename Predicate>
+class TrulyMatcher {
+ public:
+  explicit TrulyMatcher(Predicate pred) : predicate_(pred) {}
+
+  // This method template allows Truly(pred) to be used as a matcher
+  // for type T where T is the argument type of predicate 'pred'.  The
+  // argument is passed by reference as the predicate may be
+  // interested in the address of the argument.
+  template <typename T>
+  bool MatchAndExplain(T& x,  // NOLINT
+                       MatchResultListener* /* listener */) const {
+    // Without the if-statement, MSVC sometimes warns about converting
+    // a value to bool (warning 4800).
+    //
+    // We cannot write 'return !!predicate_(x);' as that doesn't work
+    // when predicate_(x) returns a class convertible to bool but
+    // having no operator!().
+    if (predicate_(x))
+      return true;
+    return false;
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "satisfies the given predicate";
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't satisfy the given predicate";
+  }
+
+ private:
+  Predicate predicate_;
+
+  GTEST_DISALLOW_ASSIGN_(TrulyMatcher);
+};
+
+// Used for implementing Matches(matcher), which turns a matcher into
+// a predicate.
+template <typename M>
+class MatcherAsPredicate {
+ public:
+  explicit MatcherAsPredicate(M matcher) : matcher_(matcher) {}
+
+  // This template operator() allows Matches(m) to be used as a
+  // predicate on type T where m is a matcher on type T.
+  //
+  // The argument x is passed by reference instead of by value, as
+  // some matcher may be interested in its address (e.g. as in
+  // Matches(Ref(n))(x)).
+  template <typename T>
+  bool operator()(const T& x) const {
+    // We let matcher_ commit to a particular type here instead of
+    // when the MatcherAsPredicate object was constructed.  This
+    // allows us to write Matches(m) where m is a polymorphic matcher
+    // (e.g. Eq(5)).
+    //
+    // If we write Matcher<T>(matcher_).Matches(x) here, it won't
+    // compile when matcher_ has type Matcher<const T&>; if we write
+    // Matcher<const T&>(matcher_).Matches(x) here, it won't compile
+    // when matcher_ has type Matcher<T>; if we just write
+    // matcher_.Matches(x), it won't compile when matcher_ is
+    // polymorphic, e.g. Eq(5).
+    //
+    // MatcherCast<const T&>() is necessary for making the code work
+    // in all of the above situations.
+    return MatcherCast<const T&>(matcher_).Matches(x);
+  }
+
+ private:
+  M matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(MatcherAsPredicate);
+};
+
+// For implementing ASSERT_THAT() and EXPECT_THAT().  The template
+// argument M must be a type that can be converted to a matcher.
+template <typename M>
+class PredicateFormatterFromMatcher {
+ public:
+  explicit PredicateFormatterFromMatcher(M m) : matcher_(internal::move(m)) {}
+
+  // This template () operator allows a PredicateFormatterFromMatcher
+  // object to act as a predicate-formatter suitable for using with
+  // Google Test's EXPECT_PRED_FORMAT1() macro.
+  template <typename T>
+  AssertionResult operator()(const char* value_text, const T& x) const {
+    // We convert matcher_ to a Matcher<const T&> *now* instead of
+    // when the PredicateFormatterFromMatcher object was constructed,
+    // as matcher_ may be polymorphic (e.g. NotNull()) and we won't
+    // know which type to instantiate it to until we actually see the
+    // type of x here.
+    //
+    // We write SafeMatcherCast<const T&>(matcher_) instead of
+    // Matcher<const T&>(matcher_), as the latter won't compile when
+    // matcher_ has type Matcher<T> (e.g. An<int>()).
+    // We don't write MatcherCast<const T&> either, as that allows
+    // potentially unsafe downcasting of the matcher argument.
+    const Matcher<const T&> matcher = SafeMatcherCast<const T&>(matcher_);
+    StringMatchResultListener listener;
+    if (MatchPrintAndExplain(x, matcher, &listener))
+      return AssertionSuccess();
+
+    ::std::stringstream ss;
+    ss << "Value of: " << value_text << "\n"
+       << "Expected: ";
+    matcher.DescribeTo(&ss);
+    ss << "\n  Actual: " << listener.str();
+    return AssertionFailure() << ss.str();
+  }
+
+ private:
+  const M matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(PredicateFormatterFromMatcher);
+};
+
+// A helper function for converting a matcher to a predicate-formatter
+// without the user needing to explicitly write the type.  This is
+// used for implementing ASSERT_THAT() and EXPECT_THAT().
+// Implementation detail: 'matcher' is received by-value to force decaying.
+template <typename M>
+inline PredicateFormatterFromMatcher<M>
+MakePredicateFormatterFromMatcher(M matcher) {
+  return PredicateFormatterFromMatcher<M>(internal::move(matcher));
+}
+
+// Implements the polymorphic floating point equality matcher, which matches
+// two float values using ULP-based approximation or, optionally, a
+// user-specified epsilon.  The template is meant to be instantiated with
+// FloatType being either float or double.
+template <typename FloatType>
+class FloatingEqMatcher {
+ public:
+  // Constructor for FloatingEqMatcher.
+  // The matcher's input will be compared with expected.  The matcher treats two
+  // NANs as equal if nan_eq_nan is true.  Otherwise, under IEEE standards,
+  // equality comparisons between NANs will always return false.  We specify a
+  // negative max_abs_error_ term to indicate that ULP-based approximation will
+  // be used for comparison.
+  FloatingEqMatcher(FloatType expected, bool nan_eq_nan) :
+    expected_(expected), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) {
+  }
+
+  // Constructor that supports a user-specified max_abs_error that will be used
+  // for comparison instead of ULP-based approximation.  The max absolute
+  // should be non-negative.
+  FloatingEqMatcher(FloatType expected, bool nan_eq_nan,
+                    FloatType max_abs_error)
+      : expected_(expected),
+        nan_eq_nan_(nan_eq_nan),
+        max_abs_error_(max_abs_error) {
+    GTEST_CHECK_(max_abs_error >= 0)
+        << ", where max_abs_error is" << max_abs_error;
+  }
+
+  // Implements floating point equality matcher as a Matcher<T>.
+  template <typename T>
+  class Impl : public MatcherInterface<T> {
+   public:
+    Impl(FloatType expected, bool nan_eq_nan, FloatType max_abs_error)
+        : expected_(expected),
+          nan_eq_nan_(nan_eq_nan),
+          max_abs_error_(max_abs_error) {}
+
+    virtual bool MatchAndExplain(T value,
+                                 MatchResultListener* listener) const {
+      const FloatingPoint<FloatType> actual(value), expected(expected_);
+
+      // Compares NaNs first, if nan_eq_nan_ is true.
+      if (actual.is_nan() || expected.is_nan()) {
+        if (actual.is_nan() && expected.is_nan()) {
+          return nan_eq_nan_;
+        }
+        // One is nan; the other is not nan.
+        return false;
+      }
+      if (HasMaxAbsError()) {
+        // We perform an equality check so that inf will match inf, regardless
+        // of error bounds.  If the result of value - expected_ would result in
+        // overflow or if either value is inf, the default result is infinity,
+        // which should only match if max_abs_error_ is also infinity.
+        if (value == expected_) {
+          return true;
+        }
+
+        const FloatType diff = value - expected_;
+        if (fabs(diff) <= max_abs_error_) {
+          return true;
+        }
+
+        if (listener->IsInterested()) {
+          *listener << "which is " << diff << " from " << expected_;
+        }
+        return false;
+      } else {
+        return actual.AlmostEquals(expected);
+      }
+    }
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      // os->precision() returns the previously set precision, which we
+      // store to restore the ostream to its original configuration
+      // after outputting.
+      const ::std::streamsize old_precision = os->precision(
+          ::std::numeric_limits<FloatType>::digits10 + 2);
+      if (FloatingPoint<FloatType>(expected_).is_nan()) {
+        if (nan_eq_nan_) {
+          *os << "is NaN";
+        } else {
+          *os << "never matches";
+        }
+      } else {
+        *os << "is approximately " << expected_;
+        if (HasMaxAbsError()) {
+          *os << " (absolute error <= " << max_abs_error_ << ")";
+        }
+      }
+      os->precision(old_precision);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      // As before, get original precision.
+      const ::std::streamsize old_precision = os->precision(
+          ::std::numeric_limits<FloatType>::digits10 + 2);
+      if (FloatingPoint<FloatType>(expected_).is_nan()) {
+        if (nan_eq_nan_) {
+          *os << "isn't NaN";
+        } else {
+          *os << "is anything";
+        }
+      } else {
+        *os << "isn't approximately " << expected_;
+        if (HasMaxAbsError()) {
+          *os << " (absolute error > " << max_abs_error_ << ")";
+        }
+      }
+      // Restore original precision.
+      os->precision(old_precision);
+    }
+
+   private:
+    bool HasMaxAbsError() const {
+      return max_abs_error_ >= 0;
+    }
+
+    const FloatType expected_;
+    const bool nan_eq_nan_;
+    // max_abs_error will be used for value comparison when >= 0.
+    const FloatType max_abs_error_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  // The following 3 type conversion operators allow FloatEq(expected) and
+  // NanSensitiveFloatEq(expected) to be used as a Matcher<float>, a
+  // Matcher<const float&>, or a Matcher<float&>, but nothing else.
+  // (While Google's C++ coding style doesn't allow arguments passed
+  // by non-const reference, we may see them in code not conforming to
+  // the style.  Therefore Google Mock needs to support them.)
+  operator Matcher<FloatType>() const {
+    return MakeMatcher(
+        new Impl<FloatType>(expected_, nan_eq_nan_, max_abs_error_));
+  }
+
+  operator Matcher<const FloatType&>() const {
+    return MakeMatcher(
+        new Impl<const FloatType&>(expected_, nan_eq_nan_, max_abs_error_));
+  }
+
+  operator Matcher<FloatType&>() const {
+    return MakeMatcher(
+        new Impl<FloatType&>(expected_, nan_eq_nan_, max_abs_error_));
+  }
+
+ private:
+  const FloatType expected_;
+  const bool nan_eq_nan_;
+  // max_abs_error will be used for value comparison when >= 0.
+  const FloatType max_abs_error_;
+
+  GTEST_DISALLOW_ASSIGN_(FloatingEqMatcher);
+};
+
+// Implements the Pointee(m) matcher for matching a pointer whose
+// pointee matches matcher m.  The pointer can be either raw or smart.
+template <typename InnerMatcher>
+class PointeeMatcher {
+ public:
+  explicit PointeeMatcher(const InnerMatcher& matcher) : matcher_(matcher) {}
+
+  // This type conversion operator template allows Pointee(m) to be
+  // used as a matcher for any pointer type whose pointee type is
+  // compatible with the inner matcher, where type Pointer can be
+  // either a raw pointer or a smart pointer.
+  //
+  // The reason we do this instead of relying on
+  // MakePolymorphicMatcher() is that the latter is not flexible
+  // enough for implementing the DescribeTo() method of Pointee().
+  template <typename Pointer>
+  operator Matcher<Pointer>() const {
+    return MakeMatcher(new Impl<Pointer>(matcher_));
+  }
+
+ private:
+  // The monomorphic implementation that works for a particular pointer type.
+  template <typename Pointer>
+  class Impl : public MatcherInterface<Pointer> {
+   public:
+    typedef typename PointeeOf<GTEST_REMOVE_CONST_(  // NOLINT
+        GTEST_REMOVE_REFERENCE_(Pointer))>::type Pointee;
+
+    explicit Impl(const InnerMatcher& matcher)
+        : matcher_(MatcherCast<const Pointee&>(matcher)) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "points to a value that ";
+      matcher_.DescribeTo(os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "does not point to a value that ";
+      matcher_.DescribeTo(os);
+    }
+
+    virtual bool MatchAndExplain(Pointer pointer,
+                                 MatchResultListener* listener) const {
+      if (GetRawPointer(pointer) == NULL)
+        return false;
+
+      *listener << "which points to ";
+      return MatchPrintAndExplain(*pointer, matcher_, listener);
+    }
+
+   private:
+    const Matcher<const Pointee&> matcher_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  const InnerMatcher matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(PointeeMatcher);
+};
+
+// Implements the WhenDynamicCastTo<T>(m) matcher that matches a pointer or
+// reference that matches inner_matcher when dynamic_cast<T> is applied.
+// The result of dynamic_cast<To> is forwarded to the inner matcher.
+// If To is a pointer and the cast fails, the inner matcher will receive NULL.
+// If To is a reference and the cast fails, this matcher returns false
+// immediately.
+template <typename To>
+class WhenDynamicCastToMatcherBase {
+ public:
+  explicit WhenDynamicCastToMatcherBase(const Matcher<To>& matcher)
+      : matcher_(matcher) {}
+
+  void DescribeTo(::std::ostream* os) const {
+    GetCastTypeDescription(os);
+    matcher_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    GetCastTypeDescription(os);
+    matcher_.DescribeNegationTo(os);
+  }
+
+ protected:
+  const Matcher<To> matcher_;
+
+  static string GetToName() {
+#if GTEST_HAS_RTTI
+    return GetTypeName<To>();
+#else  // GTEST_HAS_RTTI
+    return "the target type";
+#endif  // GTEST_HAS_RTTI
+  }
+
+ private:
+  static void GetCastTypeDescription(::std::ostream* os) {
+    *os << "when dynamic_cast to " << GetToName() << ", ";
+  }
+
+  GTEST_DISALLOW_ASSIGN_(WhenDynamicCastToMatcherBase);
+};
+
+// Primary template.
+// To is a pointer. Cast and forward the result.
+template <typename To>
+class WhenDynamicCastToMatcher : public WhenDynamicCastToMatcherBase<To> {
+ public:
+  explicit WhenDynamicCastToMatcher(const Matcher<To>& matcher)
+      : WhenDynamicCastToMatcherBase<To>(matcher) {}
+
+  template <typename From>
+  bool MatchAndExplain(From from, MatchResultListener* listener) const {
+    // TODO(sbenza): Add more detail on failures. ie did the dyn_cast fail?
+    To to = dynamic_cast<To>(from);
+    return MatchPrintAndExplain(to, this->matcher_, listener);
+  }
+};
+
+// Specialize for references.
+// In this case we return false if the dynamic_cast fails.
+template <typename To>
+class WhenDynamicCastToMatcher<To&> : public WhenDynamicCastToMatcherBase<To&> {
+ public:
+  explicit WhenDynamicCastToMatcher(const Matcher<To&>& matcher)
+      : WhenDynamicCastToMatcherBase<To&>(matcher) {}
+
+  template <typename From>
+  bool MatchAndExplain(From& from, MatchResultListener* listener) const {
+    // We don't want an std::bad_cast here, so do the cast with pointers.
+    To* to = dynamic_cast<To*>(&from);
+    if (to == NULL) {
+      *listener << "which cannot be dynamic_cast to " << this->GetToName();
+      return false;
+    }
+    return MatchPrintAndExplain(*to, this->matcher_, listener);
+  }
+};
+
+// Implements the Field() matcher for matching a field (i.e. member
+// variable) of an object.
+template <typename Class, typename FieldType>
+class FieldMatcher {
+ public:
+  FieldMatcher(FieldType Class::*field,
+               const Matcher<const FieldType&>& matcher)
+      : field_(field), matcher_(matcher) {}
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "is an object whose given field ";
+    matcher_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "is an object whose given field ";
+    matcher_.DescribeNegationTo(os);
+  }
+
+  template <typename T>
+  bool MatchAndExplain(const T& value, MatchResultListener* listener) const {
+    return MatchAndExplainImpl(
+        typename ::testing::internal::
+            is_pointer<GTEST_REMOVE_CONST_(T)>::type(),
+        value, listener);
+  }
+
+ private:
+  // The first argument of MatchAndExplainImpl() is needed to help
+  // Symbian's C++ compiler choose which overload to use.  Its type is
+  // true_type iff the Field() matcher is used to match a pointer.
+  bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj,
+                           MatchResultListener* listener) const {
+    *listener << "whose given field is ";
+    return MatchPrintAndExplain(obj.*field_, matcher_, listener);
+  }
+
+  bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p,
+                           MatchResultListener* listener) const {
+    if (p == NULL)
+      return false;
+
+    *listener << "which points to an object ";
+    // Since *p has a field, it must be a class/struct/union type and
+    // thus cannot be a pointer.  Therefore we pass false_type() as
+    // the first argument.
+    return MatchAndExplainImpl(false_type(), *p, listener);
+  }
+
+  const FieldType Class::*field_;
+  const Matcher<const FieldType&> matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(FieldMatcher);
+};
+
+// Implements the Property() matcher for matching a property
+// (i.e. return value of a getter method) of an object.
+template <typename Class, typename PropertyType>
+class PropertyMatcher {
+ public:
+  // The property may have a reference type, so 'const PropertyType&'
+  // may cause double references and fail to compile.  That's why we
+  // need GTEST_REFERENCE_TO_CONST, which works regardless of
+  // PropertyType being a reference or not.
+  typedef GTEST_REFERENCE_TO_CONST_(PropertyType) RefToConstProperty;
+
+  PropertyMatcher(PropertyType (Class::*property)() const,
+                  const Matcher<RefToConstProperty>& matcher)
+      : property_(property), matcher_(matcher) {}
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "is an object whose given property ";
+    matcher_.DescribeTo(os);
+  }
+
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "is an object whose given property ";
+    matcher_.DescribeNegationTo(os);
+  }
+
+  template <typename T>
+  bool MatchAndExplain(const T&value, MatchResultListener* listener) const {
+    return MatchAndExplainImpl(
+        typename ::testing::internal::
+            is_pointer<GTEST_REMOVE_CONST_(T)>::type(),
+        value, listener);
+  }
+
+ private:
+  // The first argument of MatchAndExplainImpl() is needed to help
+  // Symbian's C++ compiler choose which overload to use.  Its type is
+  // true_type iff the Property() matcher is used to match a pointer.
+  bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj,
+                           MatchResultListener* listener) const {
+    *listener << "whose given property is ";
+    // Cannot pass the return value (for example, int) to MatchPrintAndExplain,
+    // which takes a non-const reference as argument.
+#if defined(_PREFAST_ ) && _MSC_VER == 1800
+    // Workaround bug in VC++ 2013's /analyze parser.
+    // https://connect.microsoft.com/VisualStudio/feedback/details/1106363/internal-compiler-error-with-analyze-due-to-failure-to-infer-move
+    posix::Abort();  // To make sure it is never run.
+    return false;
+#else
+    RefToConstProperty result = (obj.*property_)();
+    return MatchPrintAndExplain(result, matcher_, listener);
+#endif
+  }
+
+  bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p,
+                           MatchResultListener* listener) const {
+    if (p == NULL)
+      return false;
+
+    *listener << "which points to an object ";
+    // Since *p has a property method, it must be a class/struct/union
+    // type and thus cannot be a pointer.  Therefore we pass
+    // false_type() as the first argument.
+    return MatchAndExplainImpl(false_type(), *p, listener);
+  }
+
+  PropertyType (Class::*property_)() const;
+  const Matcher<RefToConstProperty> matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(PropertyMatcher);
+};
+
+// Type traits specifying various features of different functors for ResultOf.
+// The default template specifies features for functor objects.
+// Functor classes have to typedef argument_type and result_type
+// to be compatible with ResultOf.
+template <typename Functor>
+struct CallableTraits {
+  typedef typename Functor::result_type ResultType;
+  typedef Functor StorageType;
+
+  static void CheckIsValid(Functor /* functor */) {}
+  template <typename T>
+  static ResultType Invoke(Functor f, T arg) { return f(arg); }
+};
+
+// Specialization for function pointers.
+template <typename ArgType, typename ResType>
+struct CallableTraits<ResType(*)(ArgType)> {
+  typedef ResType ResultType;
+  typedef ResType(*StorageType)(ArgType);
+
+  static void CheckIsValid(ResType(*f)(ArgType)) {
+    GTEST_CHECK_(f != NULL)
+        << "NULL function pointer is passed into ResultOf().";
+  }
+  template <typename T>
+  static ResType Invoke(ResType(*f)(ArgType), T arg) {
+    return (*f)(arg);
+  }
+};
+
+// Implements the ResultOf() matcher for matching a return value of a
+// unary function of an object.
+template <typename Callable>
+class ResultOfMatcher {
+ public:
+  typedef typename CallableTraits<Callable>::ResultType ResultType;
+
+  ResultOfMatcher(Callable callable, const Matcher<ResultType>& matcher)
+      : callable_(callable), matcher_(matcher) {
+    CallableTraits<Callable>::CheckIsValid(callable_);
+  }
+
+  template <typename T>
+  operator Matcher<T>() const {
+    return Matcher<T>(new Impl<T>(callable_, matcher_));
+  }
+
+ private:
+  typedef typename CallableTraits<Callable>::StorageType CallableStorageType;
+
+  template <typename T>
+  class Impl : public MatcherInterface<T> {
+   public:
+    Impl(CallableStorageType callable, const Matcher<ResultType>& matcher)
+        : callable_(callable), matcher_(matcher) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "is mapped by the given callable to a value that ";
+      matcher_.DescribeTo(os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "is mapped by the given callable to a value that ";
+      matcher_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(T obj, MatchResultListener* listener) const {
+      *listener << "which is mapped by the given callable to ";
+      // Cannot pass the return value (for example, int) to
+      // MatchPrintAndExplain, which takes a non-const reference as argument.
+      ResultType result =
+          CallableTraits<Callable>::template Invoke<T>(callable_, obj);
+      return MatchPrintAndExplain(result, matcher_, listener);
+    }
+
+   private:
+    // Functors often define operator() as non-const method even though
+    // they are actualy stateless. But we need to use them even when
+    // 'this' is a const pointer. It's the user's responsibility not to
+    // use stateful callables with ResultOf(), which does't guarantee
+    // how many times the callable will be invoked.
+    mutable CallableStorageType callable_;
+    const Matcher<ResultType> matcher_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };  // class Impl
+
+  const CallableStorageType callable_;
+  const Matcher<ResultType> matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(ResultOfMatcher);
+};
+
+// Implements a matcher that checks the size of an STL-style container.
+template <typename SizeMatcher>
+class SizeIsMatcher {
+ public:
+  explicit SizeIsMatcher(const SizeMatcher& size_matcher)
+       : size_matcher_(size_matcher) {
+  }
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(new Impl<Container>(size_matcher_));
+  }
+
+  template <typename Container>
+  class Impl : public MatcherInterface<Container> {
+   public:
+    typedef internal::StlContainerView<
+         GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView;
+    typedef typename ContainerView::type::size_type SizeType;
+    explicit Impl(const SizeMatcher& size_matcher)
+        : size_matcher_(MatcherCast<SizeType>(size_matcher)) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "size ";
+      size_matcher_.DescribeTo(os);
+    }
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "size ";
+      size_matcher_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(Container container,
+                                 MatchResultListener* listener) const {
+      SizeType size = container.size();
+      StringMatchResultListener size_listener;
+      const bool result = size_matcher_.MatchAndExplain(size, &size_listener);
+      *listener
+          << "whose size " << size << (result ? " matches" : " doesn't match");
+      PrintIfNotEmpty(size_listener.str(), listener->stream());
+      return result;
+    }
+
+   private:
+    const Matcher<SizeType> size_matcher_;
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+ private:
+  const SizeMatcher size_matcher_;
+  GTEST_DISALLOW_ASSIGN_(SizeIsMatcher);
+};
+
+// Implements a matcher that checks the begin()..end() distance of an STL-style
+// container.
+template <typename DistanceMatcher>
+class BeginEndDistanceIsMatcher {
+ public:
+  explicit BeginEndDistanceIsMatcher(const DistanceMatcher& distance_matcher)
+      : distance_matcher_(distance_matcher) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(new Impl<Container>(distance_matcher_));
+  }
+
+  template <typename Container>
+  class Impl : public MatcherInterface<Container> {
+   public:
+    typedef internal::StlContainerView<
+        GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView;
+    typedef typename std::iterator_traits<
+        typename ContainerView::type::const_iterator>::difference_type
+        DistanceType;
+    explicit Impl(const DistanceMatcher& distance_matcher)
+        : distance_matcher_(MatcherCast<DistanceType>(distance_matcher)) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "distance between begin() and end() ";
+      distance_matcher_.DescribeTo(os);
+    }
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "distance between begin() and end() ";
+      distance_matcher_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(Container container,
+                                 MatchResultListener* listener) const {
+#if GTEST_HAS_STD_BEGIN_AND_END_
+      using std::begin;
+      using std::end;
+      DistanceType distance = std::distance(begin(container), end(container));
+#else
+      DistanceType distance = std::distance(container.begin(), container.end());
+#endif
+      StringMatchResultListener distance_listener;
+      const bool result =
+          distance_matcher_.MatchAndExplain(distance, &distance_listener);
+      *listener << "whose distance between begin() and end() " << distance
+                << (result ? " matches" : " doesn't match");
+      PrintIfNotEmpty(distance_listener.str(), listener->stream());
+      return result;
+    }
+
+   private:
+    const Matcher<DistanceType> distance_matcher_;
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+ private:
+  const DistanceMatcher distance_matcher_;
+  GTEST_DISALLOW_ASSIGN_(BeginEndDistanceIsMatcher);
+};
+
+// Implements an equality matcher for any STL-style container whose elements
+// support ==. This matcher is like Eq(), but its failure explanations provide
+// more detailed information that is useful when the container is used as a set.
+// The failure message reports elements that are in one of the operands but not
+// the other. The failure messages do not report duplicate or out-of-order
+// elements in the containers (which don't properly matter to sets, but can
+// occur if the containers are vectors or lists, for example).
+//
+// Uses the container's const_iterator, value_type, operator ==,
+// begin(), and end().
+template <typename Container>
+class ContainerEqMatcher {
+ public:
+  typedef internal::StlContainerView<Container> View;
+  typedef typename View::type StlContainer;
+  typedef typename View::const_reference StlContainerReference;
+
+  // We make a copy of expected in case the elements in it are modified
+  // after this matcher is created.
+  explicit ContainerEqMatcher(const Container& expected)
+      : expected_(View::Copy(expected)) {
+    // Makes sure the user doesn't instantiate this class template
+    // with a const or reference type.
+    (void)testing::StaticAssertTypeEq<Container,
+        GTEST_REMOVE_REFERENCE_AND_CONST_(Container)>();
+  }
+
+  void DescribeTo(::std::ostream* os) const {
+    *os << "equals ";
+    UniversalPrint(expected_, os);
+  }
+  void DescribeNegationTo(::std::ostream* os) const {
+    *os << "does not equal ";
+    UniversalPrint(expected_, os);
+  }
+
+  template <typename LhsContainer>
+  bool MatchAndExplain(const LhsContainer& lhs,
+                       MatchResultListener* listener) const {
+    // GTEST_REMOVE_CONST_() is needed to work around an MSVC 8.0 bug
+    // that causes LhsContainer to be a const type sometimes.
+    typedef internal::StlContainerView<GTEST_REMOVE_CONST_(LhsContainer)>
+        LhsView;
+    typedef typename LhsView::type LhsStlContainer;
+    StlContainerReference lhs_stl_container = LhsView::ConstReference(lhs);
+    if (lhs_stl_container == expected_)
+      return true;
+
+    ::std::ostream* const os = listener->stream();
+    if (os != NULL) {
+      // Something is different. Check for extra values first.
+      bool printed_header = false;
+      for (typename LhsStlContainer::const_iterator it =
+               lhs_stl_container.begin();
+           it != lhs_stl_container.end(); ++it) {
+        if (internal::ArrayAwareFind(expected_.begin(), expected_.end(), *it) ==
+            expected_.end()) {
+          if (printed_header) {
+            *os << ", ";
+          } else {
+            *os << "which has these unexpected elements: ";
+            printed_header = true;
+          }
+          UniversalPrint(*it, os);
+        }
+      }
+
+      // Now check for missing values.
+      bool printed_header2 = false;
+      for (typename StlContainer::const_iterator it = expected_.begin();
+           it != expected_.end(); ++it) {
+        if (internal::ArrayAwareFind(
+                lhs_stl_container.begin(), lhs_stl_container.end(), *it) ==
+            lhs_stl_container.end()) {
+          if (printed_header2) {
+            *os << ", ";
+          } else {
+            *os << (printed_header ? ",\nand" : "which")
+                << " doesn't have these expected elements: ";
+            printed_header2 = true;
+          }
+          UniversalPrint(*it, os);
+        }
+      }
+    }
+
+    return false;
+  }
+
+ private:
+  const StlContainer expected_;
+
+  GTEST_DISALLOW_ASSIGN_(ContainerEqMatcher);
+};
+
+// A comparator functor that uses the < operator to compare two values.
+struct LessComparator {
+  template <typename T, typename U>
+  bool operator()(const T& lhs, const U& rhs) const { return lhs < rhs; }
+};
+
+// Implements WhenSortedBy(comparator, container_matcher).
+template <typename Comparator, typename ContainerMatcher>
+class WhenSortedByMatcher {
+ public:
+  WhenSortedByMatcher(const Comparator& comparator,
+                      const ContainerMatcher& matcher)
+      : comparator_(comparator), matcher_(matcher) {}
+
+  template <typename LhsContainer>
+  operator Matcher<LhsContainer>() const {
+    return MakeMatcher(new Impl<LhsContainer>(comparator_, matcher_));
+  }
+
+  template <typename LhsContainer>
+  class Impl : public MatcherInterface<LhsContainer> {
+   public:
+    typedef internal::StlContainerView<
+         GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView;
+    typedef typename LhsView::type LhsStlContainer;
+    typedef typename LhsView::const_reference LhsStlContainerReference;
+    // Transforms std::pair<const Key, Value> into std::pair<Key, Value>
+    // so that we can match associative containers.
+    typedef typename RemoveConstFromKey<
+        typename LhsStlContainer::value_type>::type LhsValue;
+
+    Impl(const Comparator& comparator, const ContainerMatcher& matcher)
+        : comparator_(comparator), matcher_(matcher) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "(when sorted) ";
+      matcher_.DescribeTo(os);
+    }
+
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "(when sorted) ";
+      matcher_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(LhsContainer lhs,
+                                 MatchResultListener* listener) const {
+      LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs);
+      ::std::vector<LhsValue> sorted_container(lhs_stl_container.begin(),
+                                               lhs_stl_container.end());
+      ::std::sort(
+           sorted_container.begin(), sorted_container.end(), comparator_);
+
+      if (!listener->IsInterested()) {
+        // If the listener is not interested, we do not need to
+        // construct the inner explanation.
+        return matcher_.Matches(sorted_container);
+      }
+
+      *listener << "which is ";
+      UniversalPrint(sorted_container, listener->stream());
+      *listener << " when sorted";
+
+      StringMatchResultListener inner_listener;
+      const bool match = matcher_.MatchAndExplain(sorted_container,
+                                                  &inner_listener);
+      PrintIfNotEmpty(inner_listener.str(), listener->stream());
+      return match;
+    }
+
+   private:
+    const Comparator comparator_;
+    const Matcher<const ::std::vector<LhsValue>&> matcher_;
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl);
+  };
+
+ private:
+  const Comparator comparator_;
+  const ContainerMatcher matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(WhenSortedByMatcher);
+};
+
+// Implements Pointwise(tuple_matcher, rhs_container).  tuple_matcher
+// must be able to be safely cast to Matcher<tuple<const T1&, const
+// T2&> >, where T1 and T2 are the types of elements in the LHS
+// container and the RHS container respectively.
+template <typename TupleMatcher, typename RhsContainer>
+class PointwiseMatcher {
+ public:
+  typedef internal::StlContainerView<RhsContainer> RhsView;
+  typedef typename RhsView::type RhsStlContainer;
+  typedef typename RhsStlContainer::value_type RhsValue;
+
+  // Like ContainerEq, we make a copy of rhs in case the elements in
+  // it are modified after this matcher is created.
+  PointwiseMatcher(const TupleMatcher& tuple_matcher, const RhsContainer& rhs)
+      : tuple_matcher_(tuple_matcher), rhs_(RhsView::Copy(rhs)) {
+    // Makes sure the user doesn't instantiate this class template
+    // with a const or reference type.
+    (void)testing::StaticAssertTypeEq<RhsContainer,
+        GTEST_REMOVE_REFERENCE_AND_CONST_(RhsContainer)>();
+  }
+
+  template <typename LhsContainer>
+  operator Matcher<LhsContainer>() const {
+    return MakeMatcher(new Impl<LhsContainer>(tuple_matcher_, rhs_));
+  }
+
+  template <typename LhsContainer>
+  class Impl : public MatcherInterface<LhsContainer> {
+   public:
+    typedef internal::StlContainerView<
+         GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView;
+    typedef typename LhsView::type LhsStlContainer;
+    typedef typename LhsView::const_reference LhsStlContainerReference;
+    typedef typename LhsStlContainer::value_type LhsValue;
+    // We pass the LHS value and the RHS value to the inner matcher by
+    // reference, as they may be expensive to copy.  We must use tuple
+    // instead of pair here, as a pair cannot hold references (C++ 98,
+    // 20.2.2 [lib.pairs]).
+    typedef ::testing::tuple<const LhsValue&, const RhsValue&> InnerMatcherArg;
+
+    Impl(const TupleMatcher& tuple_matcher, const RhsStlContainer& rhs)
+        // mono_tuple_matcher_ holds a monomorphic version of the tuple matcher.
+        : mono_tuple_matcher_(SafeMatcherCast<InnerMatcherArg>(tuple_matcher)),
+          rhs_(rhs) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "contains " << rhs_.size()
+          << " values, where each value and its corresponding value in ";
+      UniversalPrinter<RhsStlContainer>::Print(rhs_, os);
+      *os << " ";
+      mono_tuple_matcher_.DescribeTo(os);
+    }
+    virtual void DescribeNegationTo(::std::ostream* os) const {
+      *os << "doesn't contain exactly " << rhs_.size()
+          << " values, or contains a value x at some index i"
+          << " where x and the i-th value of ";
+      UniversalPrint(rhs_, os);
+      *os << " ";
+      mono_tuple_matcher_.DescribeNegationTo(os);
+    }
+
+    virtual bool MatchAndExplain(LhsContainer lhs,
+                                 MatchResultListener* listener) const {
+      LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs);
+      const size_t actual_size = lhs_stl_container.size();
+      if (actual_size != rhs_.size()) {
+        *listener << "which contains " << actual_size << " values";
+        return false;
+      }
+
+      typename LhsStlContainer::const_iterator left = lhs_stl_container.begin();
+      typename RhsStlContainer::const_iterator right = rhs_.begin();
+      for (size_t i = 0; i != actual_size; ++i, ++left, ++right) {
+        const InnerMatcherArg value_pair(*left, *right);
+
+        if (listener->IsInterested()) {
+          StringMatchResultListener inner_listener;
+          if (!mono_tuple_matcher_.MatchAndExplain(
+                  value_pair, &inner_listener)) {
+            *listener << "where the value pair (";
+            UniversalPrint(*left, listener->stream());
+            *listener << ", ";
+            UniversalPrint(*right, listener->stream());
+            *listener << ") at index #" << i << " don't match";
+            PrintIfNotEmpty(inner_listener.str(), listener->stream());
+            return false;
+          }
+        } else {
+          if (!mono_tuple_matcher_.Matches(value_pair))
+            return false;
+        }
+      }
+
+      return true;
+    }
+
+   private:
+    const Matcher<InnerMatcherArg> mono_tuple_matcher_;
+    const RhsStlContainer rhs_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+ private:
+  const TupleMatcher tuple_matcher_;
+  const RhsStlContainer rhs_;
+
+  GTEST_DISALLOW_ASSIGN_(PointwiseMatcher);
+};
+
+// Holds the logic common to ContainsMatcherImpl and EachMatcherImpl.
+template <typename Container>
+class QuantifierMatcherImpl : public MatcherInterface<Container> {
+ public:
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+  typedef StlContainerView<RawContainer> View;
+  typedef typename View::type StlContainer;
+  typedef typename View::const_reference StlContainerReference;
+  typedef typename StlContainer::value_type Element;
+
+  template <typename InnerMatcher>
+  explicit QuantifierMatcherImpl(InnerMatcher inner_matcher)
+      : inner_matcher_(
+           testing::SafeMatcherCast<const Element&>(inner_matcher)) {}
+
+  // Checks whether:
+  // * All elements in the container match, if all_elements_should_match.
+  // * Any element in the container matches, if !all_elements_should_match.
+  bool MatchAndExplainImpl(bool all_elements_should_match,
+                           Container container,
+                           MatchResultListener* listener) const {
+    StlContainerReference stl_container = View::ConstReference(container);
+    size_t i = 0;
+    for (typename StlContainer::const_iterator it = stl_container.begin();
+         it != stl_container.end(); ++it, ++i) {
+      StringMatchResultListener inner_listener;
+      const bool matches = inner_matcher_.MatchAndExplain(*it, &inner_listener);
+
+      if (matches != all_elements_should_match) {
+        *listener << "whose element #" << i
+                  << (matches ? " matches" : " doesn't match");
+        PrintIfNotEmpty(inner_listener.str(), listener->stream());
+        return !all_elements_should_match;
+      }
+    }
+    return all_elements_should_match;
+  }
+
+ protected:
+  const Matcher<const Element&> inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(QuantifierMatcherImpl);
+};
+
+// Implements Contains(element_matcher) for the given argument type Container.
+// Symmetric to EachMatcherImpl.
+template <typename Container>
+class ContainsMatcherImpl : public QuantifierMatcherImpl<Container> {
+ public:
+  template <typename InnerMatcher>
+  explicit ContainsMatcherImpl(InnerMatcher inner_matcher)
+      : QuantifierMatcherImpl<Container>(inner_matcher) {}
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "contains at least one element that ";
+    this->inner_matcher_.DescribeTo(os);
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't contain any element that ";
+    this->inner_matcher_.DescribeTo(os);
+  }
+
+  virtual bool MatchAndExplain(Container container,
+                               MatchResultListener* listener) const {
+    return this->MatchAndExplainImpl(false, container, listener);
+  }
+
+ private:
+  GTEST_DISALLOW_ASSIGN_(ContainsMatcherImpl);
+};
+
+// Implements Each(element_matcher) for the given argument type Container.
+// Symmetric to ContainsMatcherImpl.
+template <typename Container>
+class EachMatcherImpl : public QuantifierMatcherImpl<Container> {
+ public:
+  template <typename InnerMatcher>
+  explicit EachMatcherImpl(InnerMatcher inner_matcher)
+      : QuantifierMatcherImpl<Container>(inner_matcher) {}
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "only contains elements that ";
+    this->inner_matcher_.DescribeTo(os);
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "contains some element that ";
+    this->inner_matcher_.DescribeNegationTo(os);
+  }
+
+  virtual bool MatchAndExplain(Container container,
+                               MatchResultListener* listener) const {
+    return this->MatchAndExplainImpl(true, container, listener);
+  }
+
+ private:
+  GTEST_DISALLOW_ASSIGN_(EachMatcherImpl);
+};
+
+// Implements polymorphic Contains(element_matcher).
+template <typename M>
+class ContainsMatcher {
+ public:
+  explicit ContainsMatcher(M m) : inner_matcher_(m) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(new ContainsMatcherImpl<Container>(inner_matcher_));
+  }
+
+ private:
+  const M inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(ContainsMatcher);
+};
+
+// Implements polymorphic Each(element_matcher).
+template <typename M>
+class EachMatcher {
+ public:
+  explicit EachMatcher(M m) : inner_matcher_(m) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(new EachMatcherImpl<Container>(inner_matcher_));
+  }
+
+ private:
+  const M inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(EachMatcher);
+};
+
+// Implements Key(inner_matcher) for the given argument pair type.
+// Key(inner_matcher) matches an std::pair whose 'first' field matches
+// inner_matcher.  For example, Contains(Key(Ge(5))) can be used to match an
+// std::map that contains at least one element whose key is >= 5.
+template <typename PairType>
+class KeyMatcherImpl : public MatcherInterface<PairType> {
+ public:
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType;
+  typedef typename RawPairType::first_type KeyType;
+
+  template <typename InnerMatcher>
+  explicit KeyMatcherImpl(InnerMatcher inner_matcher)
+      : inner_matcher_(
+          testing::SafeMatcherCast<const KeyType&>(inner_matcher)) {
+  }
+
+  // Returns true iff 'key_value.first' (the key) matches the inner matcher.
+  virtual bool MatchAndExplain(PairType key_value,
+                               MatchResultListener* listener) const {
+    StringMatchResultListener inner_listener;
+    const bool match = inner_matcher_.MatchAndExplain(key_value.first,
+                                                      &inner_listener);
+    const internal::string explanation = inner_listener.str();
+    if (explanation != "") {
+      *listener << "whose first field is a value " << explanation;
+    }
+    return match;
+  }
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "has a key that ";
+    inner_matcher_.DescribeTo(os);
+  }
+
+  // Describes what the negation of this matcher does.
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "doesn't have a key that ";
+    inner_matcher_.DescribeTo(os);
+  }
+
+ private:
+  const Matcher<const KeyType&> inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(KeyMatcherImpl);
+};
+
+// Implements polymorphic Key(matcher_for_key).
+template <typename M>
+class KeyMatcher {
+ public:
+  explicit KeyMatcher(M m) : matcher_for_key_(m) {}
+
+  template <typename PairType>
+  operator Matcher<PairType>() const {
+    return MakeMatcher(new KeyMatcherImpl<PairType>(matcher_for_key_));
+  }
+
+ private:
+  const M matcher_for_key_;
+
+  GTEST_DISALLOW_ASSIGN_(KeyMatcher);
+};
+
+// Implements Pair(first_matcher, second_matcher) for the given argument pair
+// type with its two matchers. See Pair() function below.
+template <typename PairType>
+class PairMatcherImpl : public MatcherInterface<PairType> {
+ public:
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType;
+  typedef typename RawPairType::first_type FirstType;
+  typedef typename RawPairType::second_type SecondType;
+
+  template <typename FirstMatcher, typename SecondMatcher>
+  PairMatcherImpl(FirstMatcher first_matcher, SecondMatcher second_matcher)
+      : first_matcher_(
+            testing::SafeMatcherCast<const FirstType&>(first_matcher)),
+        second_matcher_(
+            testing::SafeMatcherCast<const SecondType&>(second_matcher)) {
+  }
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "has a first field that ";
+    first_matcher_.DescribeTo(os);
+    *os << ", and has a second field that ";
+    second_matcher_.DescribeTo(os);
+  }
+
+  // Describes what the negation of this matcher does.
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "has a first field that ";
+    first_matcher_.DescribeNegationTo(os);
+    *os << ", or has a second field that ";
+    second_matcher_.DescribeNegationTo(os);
+  }
+
+  // Returns true iff 'a_pair.first' matches first_matcher and 'a_pair.second'
+  // matches second_matcher.
+  virtual bool MatchAndExplain(PairType a_pair,
+                               MatchResultListener* listener) const {
+    if (!listener->IsInterested()) {
+      // If the listener is not interested, we don't need to construct the
+      // explanation.
+      return first_matcher_.Matches(a_pair.first) &&
+             second_matcher_.Matches(a_pair.second);
+    }
+    StringMatchResultListener first_inner_listener;
+    if (!first_matcher_.MatchAndExplain(a_pair.first,
+                                        &first_inner_listener)) {
+      *listener << "whose first field does not match";
+      PrintIfNotEmpty(first_inner_listener.str(), listener->stream());
+      return false;
+    }
+    StringMatchResultListener second_inner_listener;
+    if (!second_matcher_.MatchAndExplain(a_pair.second,
+                                         &second_inner_listener)) {
+      *listener << "whose second field does not match";
+      PrintIfNotEmpty(second_inner_listener.str(), listener->stream());
+      return false;
+    }
+    ExplainSuccess(first_inner_listener.str(), second_inner_listener.str(),
+                   listener);
+    return true;
+  }
+
+ private:
+  void ExplainSuccess(const internal::string& first_explanation,
+                      const internal::string& second_explanation,
+                      MatchResultListener* listener) const {
+    *listener << "whose both fields match";
+    if (first_explanation != "") {
+      *listener << ", where the first field is a value " << first_explanation;
+    }
+    if (second_explanation != "") {
+      *listener << ", ";
+      if (first_explanation != "") {
+        *listener << "and ";
+      } else {
+        *listener << "where ";
+      }
+      *listener << "the second field is a value " << second_explanation;
+    }
+  }
+
+  const Matcher<const FirstType&> first_matcher_;
+  const Matcher<const SecondType&> second_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(PairMatcherImpl);
+};
+
+// Implements polymorphic Pair(first_matcher, second_matcher).
+template <typename FirstMatcher, typename SecondMatcher>
+class PairMatcher {
+ public:
+  PairMatcher(FirstMatcher first_matcher, SecondMatcher second_matcher)
+      : first_matcher_(first_matcher), second_matcher_(second_matcher) {}
+
+  template <typename PairType>
+  operator Matcher<PairType> () const {
+    return MakeMatcher(
+        new PairMatcherImpl<PairType>(
+            first_matcher_, second_matcher_));
+  }
+
+ private:
+  const FirstMatcher first_matcher_;
+  const SecondMatcher second_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(PairMatcher);
+};
+
+// Implements ElementsAre() and ElementsAreArray().
+template <typename Container>
+class ElementsAreMatcherImpl : public MatcherInterface<Container> {
+ public:
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+  typedef internal::StlContainerView<RawContainer> View;
+  typedef typename View::type StlContainer;
+  typedef typename View::const_reference StlContainerReference;
+  typedef typename StlContainer::value_type Element;
+
+  // Constructs the matcher from a sequence of element values or
+  // element matchers.
+  template <typename InputIter>
+  ElementsAreMatcherImpl(InputIter first, InputIter last) {
+    while (first != last) {
+      matchers_.push_back(MatcherCast<const Element&>(*first++));
+    }
+  }
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    if (count() == 0) {
+      *os << "is empty";
+    } else if (count() == 1) {
+      *os << "has 1 element that ";
+      matchers_[0].DescribeTo(os);
+    } else {
+      *os << "has " << Elements(count()) << " where\n";
+      for (size_t i = 0; i != count(); ++i) {
+        *os << "element #" << i << " ";
+        matchers_[i].DescribeTo(os);
+        if (i + 1 < count()) {
+          *os << ",\n";
+        }
+      }
+    }
+  }
+
+  // Describes what the negation of this matcher does.
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    if (count() == 0) {
+      *os << "isn't empty";
+      return;
+    }
+
+    *os << "doesn't have " << Elements(count()) << ", or\n";
+    for (size_t i = 0; i != count(); ++i) {
+      *os << "element #" << i << " ";
+      matchers_[i].DescribeNegationTo(os);
+      if (i + 1 < count()) {
+        *os << ", or\n";
+      }
+    }
+  }
+
+  virtual bool MatchAndExplain(Container container,
+                               MatchResultListener* listener) const {
+    // To work with stream-like "containers", we must only walk
+    // through the elements in one pass.
+
+    const bool listener_interested = listener->IsInterested();
+
+    // explanations[i] is the explanation of the element at index i.
+    ::std::vector<internal::string> explanations(count());
+    StlContainerReference stl_container = View::ConstReference(container);
+    typename StlContainer::const_iterator it = stl_container.begin();
+    size_t exam_pos = 0;
+    bool mismatch_found = false;  // Have we found a mismatched element yet?
+
+    // Go through the elements and matchers in pairs, until we reach
+    // the end of either the elements or the matchers, or until we find a
+    // mismatch.
+    for (; it != stl_container.end() && exam_pos != count(); ++it, ++exam_pos) {
+      bool match;  // Does the current element match the current matcher?
+      if (listener_interested) {
+        StringMatchResultListener s;
+        match = matchers_[exam_pos].MatchAndExplain(*it, &s);
+        explanations[exam_pos] = s.str();
+      } else {
+        match = matchers_[exam_pos].Matches(*it);
+      }
+
+      if (!match) {
+        mismatch_found = true;
+        break;
+      }
+    }
+    // If mismatch_found is true, 'exam_pos' is the index of the mismatch.
+
+    // Find how many elements the actual container has.  We avoid
+    // calling size() s.t. this code works for stream-like "containers"
+    // that don't define size().
+    size_t actual_count = exam_pos;
+    for (; it != stl_container.end(); ++it) {
+      ++actual_count;
+    }
+
+    if (actual_count != count()) {
+      // The element count doesn't match.  If the container is empty,
+      // there's no need to explain anything as Google Mock already
+      // prints the empty container.  Otherwise we just need to show
+      // how many elements there actually are.
+      if (listener_interested && (actual_count != 0)) {
+        *listener << "which has " << Elements(actual_count);
+      }
+      return false;
+    }
+
+    if (mismatch_found) {
+      // The element count matches, but the exam_pos-th element doesn't match.
+      if (listener_interested) {
+        *listener << "whose element #" << exam_pos << " doesn't match";
+        PrintIfNotEmpty(explanations[exam_pos], listener->stream());
+      }
+      return false;
+    }
+
+    // Every element matches its expectation.  We need to explain why
+    // (the obvious ones can be skipped).
+    if (listener_interested) {
+      bool reason_printed = false;
+      for (size_t i = 0; i != count(); ++i) {
+        const internal::string& s = explanations[i];
+        if (!s.empty()) {
+          if (reason_printed) {
+            *listener << ",\nand ";
+          }
+          *listener << "whose element #" << i << " matches, " << s;
+          reason_printed = true;
+        }
+      }
+    }
+    return true;
+  }
+
+ private:
+  static Message Elements(size_t count) {
+    return Message() << count << (count == 1 ? " element" : " elements");
+  }
+
+  size_t count() const { return matchers_.size(); }
+
+  ::std::vector<Matcher<const Element&> > matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(ElementsAreMatcherImpl);
+};
+
+// Connectivity matrix of (elements X matchers), in element-major order.
+// Initially, there are no edges.
+// Use NextGraph() to iterate over all possible edge configurations.
+// Use Randomize() to generate a random edge configuration.
+class GTEST_API_ MatchMatrix {
+ public:
+  MatchMatrix(size_t num_elements, size_t num_matchers)
+      : num_elements_(num_elements),
+        num_matchers_(num_matchers),
+        matched_(num_elements_* num_matchers_, 0) {
+  }
+
+  size_t LhsSize() const { return num_elements_; }
+  size_t RhsSize() const { return num_matchers_; }
+  bool HasEdge(size_t ilhs, size_t irhs) const {
+    return matched_[SpaceIndex(ilhs, irhs)] == 1;
+  }
+  void SetEdge(size_t ilhs, size_t irhs, bool b) {
+    matched_[SpaceIndex(ilhs, irhs)] = b ? 1 : 0;
+  }
+
+  // Treating the connectivity matrix as a (LhsSize()*RhsSize())-bit number,
+  // adds 1 to that number; returns false if incrementing the graph left it
+  // empty.
+  bool NextGraph();
+
+  void Randomize();
+
+  string DebugString() const;
+
+ private:
+  size_t SpaceIndex(size_t ilhs, size_t irhs) const {
+    return ilhs * num_matchers_ + irhs;
+  }
+
+  size_t num_elements_;
+  size_t num_matchers_;
+
+  // Each element is a char interpreted as bool. They are stored as a
+  // flattened array in lhs-major order, use 'SpaceIndex()' to translate
+  // a (ilhs, irhs) matrix coordinate into an offset.
+  ::std::vector<char> matched_;
+};
+
+typedef ::std::pair<size_t, size_t> ElementMatcherPair;
+typedef ::std::vector<ElementMatcherPair> ElementMatcherPairs;
+
+// Returns a maximum bipartite matching for the specified graph 'g'.
+// The matching is represented as a vector of {element, matcher} pairs.
+GTEST_API_ ElementMatcherPairs
+FindMaxBipartiteMatching(const MatchMatrix& g);
+
+GTEST_API_ bool FindPairing(const MatchMatrix& matrix,
+                            MatchResultListener* listener);
+
+// Untyped base class for implementing UnorderedElementsAre.  By
+// putting logic that's not specific to the element type here, we
+// reduce binary bloat and increase compilation speed.
+class GTEST_API_ UnorderedElementsAreMatcherImplBase {
+ protected:
+  // A vector of matcher describers, one for each element matcher.
+  // Does not own the describers (and thus can be used only when the
+  // element matchers are alive).
+  typedef ::std::vector<const MatcherDescriberInterface*> MatcherDescriberVec;
+
+  // Describes this UnorderedElementsAre matcher.
+  void DescribeToImpl(::std::ostream* os) const;
+
+  // Describes the negation of this UnorderedElementsAre matcher.
+  void DescribeNegationToImpl(::std::ostream* os) const;
+
+  bool VerifyAllElementsAndMatchersAreMatched(
+      const ::std::vector<string>& element_printouts,
+      const MatchMatrix& matrix,
+      MatchResultListener* listener) const;
+
+  MatcherDescriberVec& matcher_describers() {
+    return matcher_describers_;
+  }
+
+  static Message Elements(size_t n) {
+    return Message() << n << " element" << (n == 1 ? "" : "s");
+  }
+
+ private:
+  MatcherDescriberVec matcher_describers_;
+
+  GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcherImplBase);
+};
+
+// Implements unordered ElementsAre and unordered ElementsAreArray.
+template <typename Container>
+class UnorderedElementsAreMatcherImpl
+    : public MatcherInterface<Container>,
+      public UnorderedElementsAreMatcherImplBase {
+ public:
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+  typedef internal::StlContainerView<RawContainer> View;
+  typedef typename View::type StlContainer;
+  typedef typename View::const_reference StlContainerReference;
+  typedef typename StlContainer::const_iterator StlContainerConstIterator;
+  typedef typename StlContainer::value_type Element;
+
+  // Constructs the matcher from a sequence of element values or
+  // element matchers.
+  template <typename InputIter>
+  UnorderedElementsAreMatcherImpl(InputIter first, InputIter last) {
+    for (; first != last; ++first) {
+      matchers_.push_back(MatcherCast<const Element&>(*first));
+      matcher_describers().push_back(matchers_.back().GetDescriber());
+    }
+  }
+
+  // Describes what this matcher does.
+  virtual void DescribeTo(::std::ostream* os) const {
+    return UnorderedElementsAreMatcherImplBase::DescribeToImpl(os);
+  }
+
+  // Describes what the negation of this matcher does.
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    return UnorderedElementsAreMatcherImplBase::DescribeNegationToImpl(os);
+  }
+
+  virtual bool MatchAndExplain(Container container,
+                               MatchResultListener* listener) const {
+    StlContainerReference stl_container = View::ConstReference(container);
+    ::std::vector<string> element_printouts;
+    MatchMatrix matrix = AnalyzeElements(stl_container.begin(),
+                                         stl_container.end(),
+                                         &element_printouts,
+                                         listener);
+
+    const size_t actual_count = matrix.LhsSize();
+    if (actual_count == 0 && matchers_.empty()) {
+      return true;
+    }
+    if (actual_count != matchers_.size()) {
+      // The element count doesn't match.  If the container is empty,
+      // there's no need to explain anything as Google Mock already
+      // prints the empty container. Otherwise we just need to show
+      // how many elements there actually are.
+      if (actual_count != 0 && listener->IsInterested()) {
+        *listener << "which has " << Elements(actual_count);
+      }
+      return false;
+    }
+
+    return VerifyAllElementsAndMatchersAreMatched(element_printouts,
+                                                  matrix, listener) &&
+           FindPairing(matrix, listener);
+  }
+
+ private:
+  typedef ::std::vector<Matcher<const Element&> > MatcherVec;
+
+  template <typename ElementIter>
+  MatchMatrix AnalyzeElements(ElementIter elem_first, ElementIter elem_last,
+                              ::std::vector<string>* element_printouts,
+                              MatchResultListener* listener) const {
+    element_printouts->clear();
+    ::std::vector<char> did_match;
+    size_t num_elements = 0;
+    for (; elem_first != elem_last; ++num_elements, ++elem_first) {
+      if (listener->IsInterested()) {
+        element_printouts->push_back(PrintToString(*elem_first));
+      }
+      for (size_t irhs = 0; irhs != matchers_.size(); ++irhs) {
+        did_match.push_back(Matches(matchers_[irhs])(*elem_first));
+      }
+    }
+
+    MatchMatrix matrix(num_elements, matchers_.size());
+    ::std::vector<char>::const_iterator did_match_iter = did_match.begin();
+    for (size_t ilhs = 0; ilhs != num_elements; ++ilhs) {
+      for (size_t irhs = 0; irhs != matchers_.size(); ++irhs) {
+        matrix.SetEdge(ilhs, irhs, *did_match_iter++ != 0);
+      }
+    }
+    return matrix;
+  }
+
+  MatcherVec matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcherImpl);
+};
+
+// Functor for use in TransformTuple.
+// Performs MatcherCast<Target> on an input argument of any type.
+template <typename Target>
+struct CastAndAppendTransform {
+  template <typename Arg>
+  Matcher<Target> operator()(const Arg& a) const {
+    return MatcherCast<Target>(a);
+  }
+};
+
+// Implements UnorderedElementsAre.
+template <typename MatcherTuple>
+class UnorderedElementsAreMatcher {
+ public:
+  explicit UnorderedElementsAreMatcher(const MatcherTuple& args)
+      : matchers_(args) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+    typedef typename internal::StlContainerView<RawContainer>::type View;
+    typedef typename View::value_type Element;
+    typedef ::std::vector<Matcher<const Element&> > MatcherVec;
+    MatcherVec matchers;
+    matchers.reserve(::testing::tuple_size<MatcherTuple>::value);
+    TransformTupleValues(CastAndAppendTransform<const Element&>(), matchers_,
+                         ::std::back_inserter(matchers));
+    return MakeMatcher(new UnorderedElementsAreMatcherImpl<Container>(
+                           matchers.begin(), matchers.end()));
+  }
+
+ private:
+  const MatcherTuple matchers_;
+  GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcher);
+};
+
+// Implements ElementsAre.
+template <typename MatcherTuple>
+class ElementsAreMatcher {
+ public:
+  explicit ElementsAreMatcher(const MatcherTuple& args) : matchers_(args) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+    typedef typename internal::StlContainerView<RawContainer>::type View;
+    typedef typename View::value_type Element;
+    typedef ::std::vector<Matcher<const Element&> > MatcherVec;
+    MatcherVec matchers;
+    matchers.reserve(::testing::tuple_size<MatcherTuple>::value);
+    TransformTupleValues(CastAndAppendTransform<const Element&>(), matchers_,
+                         ::std::back_inserter(matchers));
+    return MakeMatcher(new ElementsAreMatcherImpl<Container>(
+                           matchers.begin(), matchers.end()));
+  }
+
+ private:
+  const MatcherTuple matchers_;
+  GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher);
+};
+
+// Implements UnorderedElementsAreArray().
+template <typename T>
+class UnorderedElementsAreArrayMatcher {
+ public:
+  UnorderedElementsAreArrayMatcher() {}
+
+  template <typename Iter>
+  UnorderedElementsAreArrayMatcher(Iter first, Iter last)
+      : matchers_(first, last) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(
+        new UnorderedElementsAreMatcherImpl<Container>(matchers_.begin(),
+                                                       matchers_.end()));
+  }
+
+ private:
+  ::std::vector<T> matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreArrayMatcher);
+};
+
+// Implements ElementsAreArray().
+template <typename T>
+class ElementsAreArrayMatcher {
+ public:
+  template <typename Iter>
+  ElementsAreArrayMatcher(Iter first, Iter last) : matchers_(first, last) {}
+
+  template <typename Container>
+  operator Matcher<Container>() const {
+    return MakeMatcher(new ElementsAreMatcherImpl<Container>(
+        matchers_.begin(), matchers_.end()));
+  }
+
+ private:
+  const ::std::vector<T> matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(ElementsAreArrayMatcher);
+};
+
+// Given a 2-tuple matcher tm of type Tuple2Matcher and a value second
+// of type Second, BoundSecondMatcher<Tuple2Matcher, Second>(tm,
+// second) is a polymorphic matcher that matches a value x iff tm
+// matches tuple (x, second).  Useful for implementing
+// UnorderedPointwise() in terms of UnorderedElementsAreArray().
+//
+// BoundSecondMatcher is copyable and assignable, as we need to put
+// instances of this class in a vector when implementing
+// UnorderedPointwise().
+template <typename Tuple2Matcher, typename Second>
+class BoundSecondMatcher {
+ public:
+  BoundSecondMatcher(const Tuple2Matcher& tm, const Second& second)
+      : tuple2_matcher_(tm), second_value_(second) {}
+
+  template <typename T>
+  operator Matcher<T>() const {
+    return MakeMatcher(new Impl<T>(tuple2_matcher_, second_value_));
+  }
+
+  // We have to define this for UnorderedPointwise() to compile in
+  // C++98 mode, as it puts BoundSecondMatcher instances in a vector,
+  // which requires the elements to be assignable in C++98.  The
+  // compiler cannot generate the operator= for us, as Tuple2Matcher
+  // and Second may not be assignable.
+  //
+  // However, this should never be called, so the implementation just
+  // need to assert.
+  void operator=(const BoundSecondMatcher& /*rhs*/) {
+    GTEST_LOG_(FATAL) << "BoundSecondMatcher should never be assigned.";
+  }
+
+ private:
+  template <typename T>
+  class Impl : public MatcherInterface<T> {
+   public:
+    typedef ::testing::tuple<T, Second> ArgTuple;
+
+    Impl(const Tuple2Matcher& tm, const Second& second)
+        : mono_tuple2_matcher_(SafeMatcherCast<const ArgTuple&>(tm)),
+          second_value_(second) {}
+
+    virtual void DescribeTo(::std::ostream* os) const {
+      *os << "and ";
+      UniversalPrint(second_value_, os);
+      *os << " ";
+      mono_tuple2_matcher_.DescribeTo(os);
+    }
+
+    virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+      return mono_tuple2_matcher_.MatchAndExplain(ArgTuple(x, second_value_),
+                                                  listener);
+    }
+
+   private:
+    const Matcher<const ArgTuple&> mono_tuple2_matcher_;
+    const Second second_value_;
+
+    GTEST_DISALLOW_ASSIGN_(Impl);
+  };
+
+  const Tuple2Matcher tuple2_matcher_;
+  const Second second_value_;
+};
+
+// Given a 2-tuple matcher tm and a value second,
+// MatcherBindSecond(tm, second) returns a matcher that matches a
+// value x iff tm matches tuple (x, second).  Useful for implementing
+// UnorderedPointwise() in terms of UnorderedElementsAreArray().
+template <typename Tuple2Matcher, typename Second>
+BoundSecondMatcher<Tuple2Matcher, Second> MatcherBindSecond(
+    const Tuple2Matcher& tm, const Second& second) {
+  return BoundSecondMatcher<Tuple2Matcher, Second>(tm, second);
+}
+
+// Returns the description for a matcher defined using the MATCHER*()
+// macro where the user-supplied description string is "", if
+// 'negation' is false; otherwise returns the description of the
+// negation of the matcher.  'param_values' contains a list of strings
+// that are the print-out of the matcher's parameters.
+GTEST_API_ string FormatMatcherDescription(bool negation,
+                                           const char* matcher_name,
+                                           const Strings& param_values);
+
+}  // namespace internal
+
+// ElementsAreArray(first, last)
+// ElementsAreArray(pointer, count)
+// ElementsAreArray(array)
+// ElementsAreArray(container)
+// ElementsAreArray({ e1, e2, ..., en })
+//
+// The ElementsAreArray() functions are like ElementsAre(...), except
+// that they are given a homogeneous sequence rather than taking each
+// element as a function argument. The sequence can be specified as an
+// array, a pointer and count, a vector, an initializer list, or an
+// STL iterator range. In each of these cases, the underlying sequence
+// can be either a sequence of values or a sequence of matchers.
+//
+// All forms of ElementsAreArray() make a copy of the input matcher sequence.
+
+template <typename Iter>
+inline internal::ElementsAreArrayMatcher<
+    typename ::std::iterator_traits<Iter>::value_type>
+ElementsAreArray(Iter first, Iter last) {
+  typedef typename ::std::iterator_traits<Iter>::value_type T;
+  return internal::ElementsAreArrayMatcher<T>(first, last);
+}
+
+template <typename T>
+inline internal::ElementsAreArrayMatcher<T> ElementsAreArray(
+    const T* pointer, size_t count) {
+  return ElementsAreArray(pointer, pointer + count);
+}
+
+template <typename T, size_t N>
+inline internal::ElementsAreArrayMatcher<T> ElementsAreArray(
+    const T (&array)[N]) {
+  return ElementsAreArray(array, N);
+}
+
+template <typename Container>
+inline internal::ElementsAreArrayMatcher<typename Container::value_type>
+ElementsAreArray(const Container& container) {
+  return ElementsAreArray(container.begin(), container.end());
+}
+
+#if GTEST_HAS_STD_INITIALIZER_LIST_
+template <typename T>
+inline internal::ElementsAreArrayMatcher<T>
+ElementsAreArray(::std::initializer_list<T> xs) {
+  return ElementsAreArray(xs.begin(), xs.end());
+}
+#endif
+
+// UnorderedElementsAreArray(first, last)
+// UnorderedElementsAreArray(pointer, count)
+// UnorderedElementsAreArray(array)
+// UnorderedElementsAreArray(container)
+// UnorderedElementsAreArray({ e1, e2, ..., en })
+//
+// The UnorderedElementsAreArray() functions are like
+// ElementsAreArray(...), but allow matching the elements in any order.
+template <typename Iter>
+inline internal::UnorderedElementsAreArrayMatcher<
+    typename ::std::iterator_traits<Iter>::value_type>
+UnorderedElementsAreArray(Iter first, Iter last) {
+  typedef typename ::std::iterator_traits<Iter>::value_type T;
+  return internal::UnorderedElementsAreArrayMatcher<T>(first, last);
+}
+
+template <typename T>
+inline internal::UnorderedElementsAreArrayMatcher<T>
+UnorderedElementsAreArray(const T* pointer, size_t count) {
+  return UnorderedElementsAreArray(pointer, pointer + count);
+}
+
+template <typename T, size_t N>
+inline internal::UnorderedElementsAreArrayMatcher<T>
+UnorderedElementsAreArray(const T (&array)[N]) {
+  return UnorderedElementsAreArray(array, N);
+}
+
+template <typename Container>
+inline internal::UnorderedElementsAreArrayMatcher<
+    typename Container::value_type>
+UnorderedElementsAreArray(const Container& container) {
+  return UnorderedElementsAreArray(container.begin(), container.end());
+}
+
+#if GTEST_HAS_STD_INITIALIZER_LIST_
+template <typename T>
+inline internal::UnorderedElementsAreArrayMatcher<T>
+UnorderedElementsAreArray(::std::initializer_list<T> xs) {
+  return UnorderedElementsAreArray(xs.begin(), xs.end());
+}
+#endif
+
+// _ is a matcher that matches anything of any type.
+//
+// This definition is fine as:
+//
+//   1. The C++ standard permits using the name _ in a namespace that
+//      is not the global namespace or ::std.
+//   2. The AnythingMatcher class has no data member or constructor,
+//      so it's OK to create global variables of this type.
+//   3. c-style has approved of using _ in this case.
+const internal::AnythingMatcher _ = {};
+// Creates a matcher that matches any value of the given type T.
+template <typename T>
+inline Matcher<T> A() { return MakeMatcher(new internal::AnyMatcherImpl<T>()); }
+
+// Creates a matcher that matches any value of the given type T.
+template <typename T>
+inline Matcher<T> An() { return A<T>(); }
+
+// Creates a polymorphic matcher that matches anything equal to x.
+// Note: if the parameter of Eq() were declared as const T&, Eq("foo")
+// wouldn't compile.
+template <typename T>
+inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); }
+
+// Constructs a Matcher<T> from a 'value' of type T.  The constructed
+// matcher matches any value that's equal to 'value'.
+template <typename T>
+Matcher<T>::Matcher(T value) { *this = Eq(value); }
+
+// Creates a monomorphic matcher that matches anything with type Lhs
+// and equal to rhs.  A user may need to use this instead of Eq(...)
+// in order to resolve an overloading ambiguity.
+//
+// TypedEq<T>(x) is just a convenient short-hand for Matcher<T>(Eq(x))
+// or Matcher<T>(x), but more readable than the latter.
+//
+// We could define similar monomorphic matchers for other comparison
+// operations (e.g. TypedLt, TypedGe, and etc), but decided not to do
+// it yet as those are used much less than Eq() in practice.  A user
+// can always write Matcher<T>(Lt(5)) to be explicit about the type,
+// for example.
+template <typename Lhs, typename Rhs>
+inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); }
+
+// Creates a polymorphic matcher that matches anything >= x.
+template <typename Rhs>
+inline internal::GeMatcher<Rhs> Ge(Rhs x) {
+  return internal::GeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything > x.
+template <typename Rhs>
+inline internal::GtMatcher<Rhs> Gt(Rhs x) {
+  return internal::GtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything <= x.
+template <typename Rhs>
+inline internal::LeMatcher<Rhs> Le(Rhs x) {
+  return internal::LeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything < x.
+template <typename Rhs>
+inline internal::LtMatcher<Rhs> Lt(Rhs x) {
+  return internal::LtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything != x.
+template <typename Rhs>
+inline internal::NeMatcher<Rhs> Ne(Rhs x) {
+  return internal::NeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches any NULL pointer.
+inline PolymorphicMatcher<internal::IsNullMatcher > IsNull() {
+  return MakePolymorphicMatcher(internal::IsNullMatcher());
+}
+
+// Creates a polymorphic matcher that matches any non-NULL pointer.
+// This is convenient as Not(NULL) doesn't compile (the compiler
+// thinks that that expression is comparing a pointer with an integer).
+inline PolymorphicMatcher<internal::NotNullMatcher > NotNull() {
+  return MakePolymorphicMatcher(internal::NotNullMatcher());
+}
+
+// Creates a polymorphic matcher that matches any argument that
+// references variable x.
+template <typename T>
+inline internal::RefMatcher<T&> Ref(T& x) {  // NOLINT
+  return internal::RefMatcher<T&>(x);
+}
+
+// Creates a matcher that matches any double argument approximately
+// equal to rhs, where two NANs are considered unequal.
+inline internal::FloatingEqMatcher<double> DoubleEq(double rhs) {
+  return internal::FloatingEqMatcher<double>(rhs, false);
+}
+
+// Creates a matcher that matches any double argument approximately
+// equal to rhs, including NaN values when rhs is NaN.
+inline internal::FloatingEqMatcher<double> NanSensitiveDoubleEq(double rhs) {
+  return internal::FloatingEqMatcher<double>(rhs, true);
+}
+
+// Creates a matcher that matches any double argument approximately equal to
+// rhs, up to the specified max absolute error bound, where two NANs are
+// considered unequal.  The max absolute error bound must be non-negative.
+inline internal::FloatingEqMatcher<double> DoubleNear(
+    double rhs, double max_abs_error) {
+  return internal::FloatingEqMatcher<double>(rhs, false, max_abs_error);
+}
+
+// Creates a matcher that matches any double argument approximately equal to
+// rhs, up to the specified max absolute error bound, including NaN values when
+// rhs is NaN.  The max absolute error bound must be non-negative.
+inline internal::FloatingEqMatcher<double> NanSensitiveDoubleNear(
+    double rhs, double max_abs_error) {
+  return internal::FloatingEqMatcher<double>(rhs, true, max_abs_error);
+}
+
+// Creates a matcher that matches any float argument approximately
+// equal to rhs, where two NANs are considered unequal.
+inline internal::FloatingEqMatcher<float> FloatEq(float rhs) {
+  return internal::FloatingEqMatcher<float>(rhs, false);
+}
+
+// Creates a matcher that matches any float argument approximately
+// equal to rhs, including NaN values when rhs is NaN.
+inline internal::FloatingEqMatcher<float> NanSensitiveFloatEq(float rhs) {
+  return internal::FloatingEqMatcher<float>(rhs, true);
+}
+
+// Creates a matcher that matches any float argument approximately equal to
+// rhs, up to the specified max absolute error bound, where two NANs are
+// considered unequal.  The max absolute error bound must be non-negative.
+inline internal::FloatingEqMatcher<float> FloatNear(
+    float rhs, float max_abs_error) {
+  return internal::FloatingEqMatcher<float>(rhs, false, max_abs_error);
+}
+
+// Creates a matcher that matches any float argument approximately equal to
+// rhs, up to the specified max absolute error bound, including NaN values when
+// rhs is NaN.  The max absolute error bound must be non-negative.
+inline internal::FloatingEqMatcher<float> NanSensitiveFloatNear(
+    float rhs, float max_abs_error) {
+  return internal::FloatingEqMatcher<float>(rhs, true, max_abs_error);
+}
+
+// Creates a matcher that matches a pointer (raw or smart) that points
+// to a value that matches inner_matcher.
+template <typename InnerMatcher>
+inline internal::PointeeMatcher<InnerMatcher> Pointee(
+    const InnerMatcher& inner_matcher) {
+  return internal::PointeeMatcher<InnerMatcher>(inner_matcher);
+}
+
+// Creates a matcher that matches a pointer or reference that matches
+// inner_matcher when dynamic_cast<To> is applied.
+// The result of dynamic_cast<To> is forwarded to the inner matcher.
+// If To is a pointer and the cast fails, the inner matcher will receive NULL.
+// If To is a reference and the cast fails, this matcher returns false
+// immediately.
+template <typename To>
+inline PolymorphicMatcher<internal::WhenDynamicCastToMatcher<To> >
+WhenDynamicCastTo(const Matcher<To>& inner_matcher) {
+  return MakePolymorphicMatcher(
+      internal::WhenDynamicCastToMatcher<To>(inner_matcher));
+}
+
+// Creates a matcher that matches an object whose given field matches
+// 'matcher'.  For example,
+//   Field(&Foo::number, Ge(5))
+// matches a Foo object x iff x.number >= 5.
+template <typename Class, typename FieldType, typename FieldMatcher>
+inline PolymorphicMatcher<
+  internal::FieldMatcher<Class, FieldType> > Field(
+    FieldType Class::*field, const FieldMatcher& matcher) {
+  return MakePolymorphicMatcher(
+      internal::FieldMatcher<Class, FieldType>(
+          field, MatcherCast<const FieldType&>(matcher)));
+  // The call to MatcherCast() is required for supporting inner
+  // matchers of compatible types.  For example, it allows
+  //   Field(&Foo::bar, m)
+  // to compile where bar is an int32 and m is a matcher for int64.
+}
+
+// Creates a matcher that matches an object whose given property
+// matches 'matcher'.  For example,
+//   Property(&Foo::str, StartsWith("hi"))
+// matches a Foo object x iff x.str() starts with "hi".
+template <typename Class, typename PropertyType, typename PropertyMatcher>
+inline PolymorphicMatcher<
+  internal::PropertyMatcher<Class, PropertyType> > Property(
+    PropertyType (Class::*property)() const, const PropertyMatcher& matcher) {
+  return MakePolymorphicMatcher(
+      internal::PropertyMatcher<Class, PropertyType>(
+          property,
+          MatcherCast<GTEST_REFERENCE_TO_CONST_(PropertyType)>(matcher)));
+  // The call to MatcherCast() is required for supporting inner
+  // matchers of compatible types.  For example, it allows
+  //   Property(&Foo::bar, m)
+  // to compile where bar() returns an int32 and m is a matcher for int64.
+}
+
+// Creates a matcher that matches an object iff the result of applying
+// a callable to x matches 'matcher'.
+// For example,
+//   ResultOf(f, StartsWith("hi"))
+// matches a Foo object x iff f(x) starts with "hi".
+// callable parameter can be a function, function pointer, or a functor.
+// Callable has to satisfy the following conditions:
+//   * It is required to keep no state affecting the results of
+//     the calls on it and make no assumptions about how many calls
+//     will be made. Any state it keeps must be protected from the
+//     concurrent access.
+//   * If it is a function object, it has to define type result_type.
+//     We recommend deriving your functor classes from std::unary_function.
+template <typename Callable, typename ResultOfMatcher>
+internal::ResultOfMatcher<Callable> ResultOf(
+    Callable callable, const ResultOfMatcher& matcher) {
+  return internal::ResultOfMatcher<Callable>(
+          callable,
+          MatcherCast<typename internal::CallableTraits<Callable>::ResultType>(
+              matcher));
+  // The call to MatcherCast() is required for supporting inner
+  // matchers of compatible types.  For example, it allows
+  //   ResultOf(Function, m)
+  // to compile where Function() returns an int32 and m is a matcher for int64.
+}
+
+// String matchers.
+
+// Matches a string equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+    StrEq(const internal::string& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+      str, true, true));
+}
+
+// Matches a string not equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+    StrNe(const internal::string& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+      str, false, true));
+}
+
+// Matches a string equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+    StrCaseEq(const internal::string& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+      str, true, false));
+}
+
+// Matches a string not equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+    StrCaseNe(const internal::string& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+      str, false, false));
+}
+
+// Creates a matcher that matches any string, std::string, or C string
+// that contains the given substring.
+inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::string> >
+    HasSubstr(const internal::string& substring) {
+  return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::string>(
+      substring));
+}
+
+// Matches a string that starts with 'prefix' (case-sensitive).
+inline PolymorphicMatcher<internal::StartsWithMatcher<internal::string> >
+    StartsWith(const internal::string& prefix) {
+  return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::string>(
+      prefix));
+}
+
+// Matches a string that ends with 'suffix' (case-sensitive).
+inline PolymorphicMatcher<internal::EndsWithMatcher<internal::string> >
+    EndsWith(const internal::string& suffix) {
+  return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::string>(
+      suffix));
+}
+
+// Matches a string that fully matches regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+    const internal::RE* regex) {
+  return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true));
+}
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+    const internal::string& regex) {
+  return MatchesRegex(new internal::RE(regex));
+}
+
+// Matches a string that contains regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+    const internal::RE* regex) {
+  return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false));
+}
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+    const internal::string& regex) {
+  return ContainsRegex(new internal::RE(regex));
+}
+
+#if GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING
+// Wide string matchers.
+
+// Matches a string equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+    StrEq(const internal::wstring& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+      str, true, true));
+}
+
+// Matches a string not equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+    StrNe(const internal::wstring& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+      str, false, true));
+}
+
+// Matches a string equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+    StrCaseEq(const internal::wstring& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+      str, true, false));
+}
+
+// Matches a string not equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+    StrCaseNe(const internal::wstring& str) {
+  return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+      str, false, false));
+}
+
+// Creates a matcher that matches any wstring, std::wstring, or C wide string
+// that contains the given substring.
+inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::wstring> >
+    HasSubstr(const internal::wstring& substring) {
+  return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::wstring>(
+      substring));
+}
+
+// Matches a string that starts with 'prefix' (case-sensitive).
+inline PolymorphicMatcher<internal::StartsWithMatcher<internal::wstring> >
+    StartsWith(const internal::wstring& prefix) {
+  return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::wstring>(
+      prefix));
+}
+
+// Matches a string that ends with 'suffix' (case-sensitive).
+inline PolymorphicMatcher<internal::EndsWithMatcher<internal::wstring> >
+    EndsWith(const internal::wstring& suffix) {
+  return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::wstring>(
+      suffix));
+}
+
+#endif  // GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field == the second field.
+inline internal::Eq2Matcher Eq() { return internal::Eq2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field >= the second field.
+inline internal::Ge2Matcher Ge() { return internal::Ge2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field > the second field.
+inline internal::Gt2Matcher Gt() { return internal::Gt2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field <= the second field.
+inline internal::Le2Matcher Le() { return internal::Le2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field < the second field.
+inline internal::Lt2Matcher Lt() { return internal::Lt2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field != the second field.
+inline internal::Ne2Matcher Ne() { return internal::Ne2Matcher(); }
+
+// Creates a matcher that matches any value of type T that m doesn't
+// match.
+template <typename InnerMatcher>
+inline internal::NotMatcher<InnerMatcher> Not(InnerMatcher m) {
+  return internal::NotMatcher<InnerMatcher>(m);
+}
+
+// Returns a matcher that matches anything that satisfies the given
+// predicate.  The predicate can be any unary function or functor
+// whose return type can be implicitly converted to bool.
+template <typename Predicate>
+inline PolymorphicMatcher<internal::TrulyMatcher<Predicate> >
+Truly(Predicate pred) {
+  return MakePolymorphicMatcher(internal::TrulyMatcher<Predicate>(pred));
+}
+
+// Returns a matcher that matches the container size. The container must
+// support both size() and size_type which all STL-like containers provide.
+// Note that the parameter 'size' can be a value of type size_type as well as
+// matcher. For instance:
+//   EXPECT_THAT(container, SizeIs(2));     // Checks container has 2 elements.
+//   EXPECT_THAT(container, SizeIs(Le(2));  // Checks container has at most 2.
+template <typename SizeMatcher>
+inline internal::SizeIsMatcher<SizeMatcher>
+SizeIs(const SizeMatcher& size_matcher) {
+  return internal::SizeIsMatcher<SizeMatcher>(size_matcher);
+}
+
+// Returns a matcher that matches the distance between the container's begin()
+// iterator and its end() iterator, i.e. the size of the container. This matcher
+// can be used instead of SizeIs with containers such as std::forward_list which
+// do not implement size(). The container must provide const_iterator (with
+// valid iterator_traits), begin() and end().
+template <typename DistanceMatcher>
+inline internal::BeginEndDistanceIsMatcher<DistanceMatcher>
+BeginEndDistanceIs(const DistanceMatcher& distance_matcher) {
+  return internal::BeginEndDistanceIsMatcher<DistanceMatcher>(distance_matcher);
+}
+
+// Returns a matcher that matches an equal container.
+// This matcher behaves like Eq(), but in the event of mismatch lists the
+// values that are included in one container but not the other. (Duplicate
+// values and order differences are not explained.)
+template <typename Container>
+inline PolymorphicMatcher<internal::ContainerEqMatcher<  // NOLINT
+                            GTEST_REMOVE_CONST_(Container)> >
+    ContainerEq(const Container& rhs) {
+  // This following line is for working around a bug in MSVC 8.0,
+  // which causes Container to be a const type sometimes.
+  typedef GTEST_REMOVE_CONST_(Container) RawContainer;
+  return MakePolymorphicMatcher(
+      internal::ContainerEqMatcher<RawContainer>(rhs));
+}
+
+// Returns a matcher that matches a container that, when sorted using
+// the given comparator, matches container_matcher.
+template <typename Comparator, typename ContainerMatcher>
+inline internal::WhenSortedByMatcher<Comparator, ContainerMatcher>
+WhenSortedBy(const Comparator& comparator,
+             const ContainerMatcher& container_matcher) {
+  return internal::WhenSortedByMatcher<Comparator, ContainerMatcher>(
+      comparator, container_matcher);
+}
+
+// Returns a matcher that matches a container that, when sorted using
+// the < operator, matches container_matcher.
+template <typename ContainerMatcher>
+inline internal::WhenSortedByMatcher<internal::LessComparator, ContainerMatcher>
+WhenSorted(const ContainerMatcher& container_matcher) {
+  return
+      internal::WhenSortedByMatcher<internal::LessComparator, ContainerMatcher>(
+          internal::LessComparator(), container_matcher);
+}
+
+// Matches an STL-style container or a native array that contains the
+// same number of elements as in rhs, where its i-th element and rhs's
+// i-th element (as a pair) satisfy the given pair matcher, for all i.
+// TupleMatcher must be able to be safely cast to Matcher<tuple<const
+// T1&, const T2&> >, where T1 and T2 are the types of elements in the
+// LHS container and the RHS container respectively.
+template <typename TupleMatcher, typename Container>
+inline internal::PointwiseMatcher<TupleMatcher,
+                                  GTEST_REMOVE_CONST_(Container)>
+Pointwise(const TupleMatcher& tuple_matcher, const Container& rhs) {
+  // This following line is for working around a bug in MSVC 8.0,
+  // which causes Container to be a const type sometimes (e.g. when
+  // rhs is a const int[])..
+  typedef GTEST_REMOVE_CONST_(Container) RawContainer;
+  return internal::PointwiseMatcher<TupleMatcher, RawContainer>(
+      tuple_matcher, rhs);
+}
+
+#if GTEST_HAS_STD_INITIALIZER_LIST_
+
+// Supports the Pointwise(m, {a, b, c}) syntax.
+template <typename TupleMatcher, typename T>
+inline internal::PointwiseMatcher<TupleMatcher, std::vector<T> > Pointwise(
+    const TupleMatcher& tuple_matcher, std::initializer_list<T> rhs) {
+  return Pointwise(tuple_matcher, std::vector<T>(rhs));
+}
+
+#endif  // GTEST_HAS_STD_INITIALIZER_LIST_
+
+// UnorderedPointwise(pair_matcher, rhs) matches an STL-style
+// container or a native array that contains the same number of
+// elements as in rhs, where in some permutation of the container, its
+// i-th element and rhs's i-th element (as a pair) satisfy the given
+// pair matcher, for all i.  Tuple2Matcher must be able to be safely
+// cast to Matcher<tuple<const T1&, const T2&> >, where T1 and T2 are
+// the types of elements in the LHS container and the RHS container
+// respectively.
+//
+// This is like Pointwise(pair_matcher, rhs), except that the element
+// order doesn't matter.
+template <typename Tuple2Matcher, typename RhsContainer>
+inline internal::UnorderedElementsAreArrayMatcher<
+    typename internal::BoundSecondMatcher<
+        Tuple2Matcher, typename internal::StlContainerView<GTEST_REMOVE_CONST_(
+                           RhsContainer)>::type::value_type> >
+UnorderedPointwise(const Tuple2Matcher& tuple2_matcher,
+                   const RhsContainer& rhs_container) {
+  // This following line is for working around a bug in MSVC 8.0,
+  // which causes RhsContainer to be a const type sometimes (e.g. when
+  // rhs_container is a const int[]).
+  typedef GTEST_REMOVE_CONST_(RhsContainer) RawRhsContainer;
+
+  // RhsView allows the same code to handle RhsContainer being a
+  // STL-style container and it being a native C-style array.
+  typedef typename internal::StlContainerView<RawRhsContainer> RhsView;
+  typedef typename RhsView::type RhsStlContainer;
+  typedef typename RhsStlContainer::value_type Second;
+  const RhsStlContainer& rhs_stl_container =
+      RhsView::ConstReference(rhs_container);
+
+  // Create a matcher for each element in rhs_container.
+  ::std::vector<internal::BoundSecondMatcher<Tuple2Matcher, Second> > matchers;
+  for (typename RhsStlContainer::const_iterator it = rhs_stl_container.begin();
+       it != rhs_stl_container.end(); ++it) {
+    matchers.push_back(
+        internal::MatcherBindSecond(tuple2_matcher, *it));
+  }
+
+  // Delegate the work to UnorderedElementsAreArray().
+  return UnorderedElementsAreArray(matchers);
+}
+
+#if GTEST_HAS_STD_INITIALIZER_LIST_
+
+// Supports the UnorderedPointwise(m, {a, b, c}) syntax.
+template <typename Tuple2Matcher, typename T>
+inline internal::UnorderedElementsAreArrayMatcher<
+    typename internal::BoundSecondMatcher<Tuple2Matcher, T> >
+UnorderedPointwise(const Tuple2Matcher& tuple2_matcher,
+                   std::initializer_list<T> rhs) {
+  return UnorderedPointwise(tuple2_matcher, std::vector<T>(rhs));
+}
+
+#endif  // GTEST_HAS_STD_INITIALIZER_LIST_
+
+// Matches an STL-style container or a native array that contains at
+// least one element matching the given value or matcher.
+//
+// Examples:
+//   ::std::set<int> page_ids;
+//   page_ids.insert(3);
+//   page_ids.insert(1);
+//   EXPECT_THAT(page_ids, Contains(1));
+//   EXPECT_THAT(page_ids, Contains(Gt(2)));
+//   EXPECT_THAT(page_ids, Not(Contains(4)));
+//
+//   ::std::map<int, size_t> page_lengths;
+//   page_lengths[1] = 100;
+//   EXPECT_THAT(page_lengths,
+//               Contains(::std::pair<const int, size_t>(1, 100)));
+//
+//   const char* user_ids[] = { "joe", "mike", "tom" };
+//   EXPECT_THAT(user_ids, Contains(Eq(::std::string("tom"))));
+template <typename M>
+inline internal::ContainsMatcher<M> Contains(M matcher) {
+  return internal::ContainsMatcher<M>(matcher);
+}
+
+// Matches an STL-style container or a native array that contains only
+// elements matching the given value or matcher.
+//
+// Each(m) is semantically equivalent to Not(Contains(Not(m))). Only
+// the messages are different.
+//
+// Examples:
+//   ::std::set<int> page_ids;
+//   // Each(m) matches an empty container, regardless of what m is.
+//   EXPECT_THAT(page_ids, Each(Eq(1)));
+//   EXPECT_THAT(page_ids, Each(Eq(77)));
+//
+//   page_ids.insert(3);
+//   EXPECT_THAT(page_ids, Each(Gt(0)));
+//   EXPECT_THAT(page_ids, Not(Each(Gt(4))));
+//   page_ids.insert(1);
+//   EXPECT_THAT(page_ids, Not(Each(Lt(2))));
+//
+//   ::std::map<int, size_t> page_lengths;
+//   page_lengths[1] = 100;
+//   page_lengths[2] = 200;
+//   page_lengths[3] = 300;
+//   EXPECT_THAT(page_lengths, Not(Each(Pair(1, 100))));
+//   EXPECT_THAT(page_lengths, Each(Key(Le(3))));
+//
+//   const char* user_ids[] = { "joe", "mike", "tom" };
+//   EXPECT_THAT(user_ids, Not(Each(Eq(::std::string("tom")))));
+template <typename M>
+inline internal::EachMatcher<M> Each(M matcher) {
+  return internal::EachMatcher<M>(matcher);
+}
+
+// Key(inner_matcher) matches an std::pair whose 'first' field matches
+// inner_matcher.  For example, Contains(Key(Ge(5))) can be used to match an
+// std::map that contains at least one element whose key is >= 5.
+template <typename M>
+inline internal::KeyMatcher<M> Key(M inner_matcher) {
+  return internal::KeyMatcher<M>(inner_matcher);
+}
+
+// Pair(first_matcher, second_matcher) matches a std::pair whose 'first' field
+// matches first_matcher and whose 'second' field matches second_matcher.  For
+// example, EXPECT_THAT(map_type, ElementsAre(Pair(Ge(5), "foo"))) can be used
+// to match a std::map<int, string> that contains exactly one element whose key
+// is >= 5 and whose value equals "foo".
+template <typename FirstMatcher, typename SecondMatcher>
+inline internal::PairMatcher<FirstMatcher, SecondMatcher>
+Pair(FirstMatcher first_matcher, SecondMatcher second_matcher) {
+  return internal::PairMatcher<FirstMatcher, SecondMatcher>(
+      first_matcher, second_matcher);
+}
+
+// Returns a predicate that is satisfied by anything that matches the
+// given matcher.
+template <typename M>
+inline internal::MatcherAsPredicate<M> Matches(M matcher) {
+  return internal::MatcherAsPredicate<M>(matcher);
+}
+
+// Returns true iff the value matches the matcher.
+template <typename T, typename M>
+inline bool Value(const T& value, M matcher) {
+  return testing::Matches(matcher)(value);
+}
+
+// Matches the value against the given matcher and explains the match
+// result to listener.
+template <typename T, typename M>
+inline bool ExplainMatchResult(
+    M matcher, const T& value, MatchResultListener* listener) {
+  return SafeMatcherCast<const T&>(matcher).MatchAndExplain(value, listener);
+}
+
+#if GTEST_LANG_CXX11
+// Define variadic matcher versions. They are overloaded in
+// gmock-generated-matchers.h for the cases supported by pre C++11 compilers.
+template <typename... Args>
+inline internal::AllOfMatcher<Args...> AllOf(const Args&... matchers) {
+  return internal::AllOfMatcher<Args...>(matchers...);
+}
+
+template <typename... Args>
+inline internal::AnyOfMatcher<Args...> AnyOf(const Args&... matchers) {
+  return internal::AnyOfMatcher<Args...>(matchers...);
+}
+
+#endif  // GTEST_LANG_CXX11
+
+// AllArgs(m) is a synonym of m.  This is useful in
+//
+//   EXPECT_CALL(foo, Bar(_, _)).With(AllArgs(Eq()));
+//
+// which is easier to read than
+//
+//   EXPECT_CALL(foo, Bar(_, _)).With(Eq());
+template <typename InnerMatcher>
+inline InnerMatcher AllArgs(const InnerMatcher& matcher) { return matcher; }
+
+// These macros allow using matchers to check values in Google Test
+// tests.  ASSERT_THAT(value, matcher) and EXPECT_THAT(value, matcher)
+// succeed iff the value matches the matcher.  If the assertion fails,
+// the value and the description of the matcher will be printed.
+#define ASSERT_THAT(value, matcher) ASSERT_PRED_FORMAT1(\
+    ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value)
+#define EXPECT_THAT(value, matcher) EXPECT_PRED_FORMAT1(\
+    ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value)
+
+}  // namespace testing
+
+// Include any custom callback matchers added by the local installation.
+// We must include this header at the end to make sure it can use the
+// declarations from this file.
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// ============================================================
+// An installation-specific extension point for gmock-matchers.h.
+// ============================================================
+//
+// Adds google3 callback support to CallableTraits.
+//
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_CALLBACK_MATCHERS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_CALLBACK_MATCHERS_H_
+
+#endif  //  GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_CALLBACK_MATCHERS_H_
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+
+namespace testing {
+
+// An abstract handle of an expectation.
+class Expectation;
+
+// A set of expectation handles.
+class ExpectationSet;
+
+// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION
+// and MUST NOT BE USED IN USER CODE!!!
+namespace internal {
+
+// Implements a mock function.
+template <typename F> class FunctionMocker;
+
+// Base class for expectations.
+class ExpectationBase;
+
+// Implements an expectation.
+template <typename F> class TypedExpectation;
+
+// Helper class for testing the Expectation class template.
+class ExpectationTester;
+
+// Base class for function mockers.
+template <typename F> class FunctionMockerBase;
+
+// Protects the mock object registry (in class Mock), all function
+// mockers, and all expectations.
+//
+// The reason we don't use more fine-grained protection is: when a
+// mock function Foo() is called, it needs to consult its expectations
+// to see which one should be picked.  If another thread is allowed to
+// call a mock function (either Foo() or a different one) at the same
+// time, it could affect the "retired" attributes of Foo()'s
+// expectations when InSequence() is used, and thus affect which
+// expectation gets picked.  Therefore, we sequence all mock function
+// calls to ensure the integrity of the mock objects' states.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_gmock_mutex);
+
+// Untyped base class for ActionResultHolder<R>.
+class UntypedActionResultHolderBase;
+
+// Abstract base class of FunctionMockerBase.  This is the
+// type-agnostic part of the function mocker interface.  Its pure
+// virtual methods are implemented by FunctionMockerBase.
+class GTEST_API_ UntypedFunctionMockerBase {
+ public:
+  UntypedFunctionMockerBase();
+  virtual ~UntypedFunctionMockerBase();
+
+  // Verifies that all expectations on this mock function have been
+  // satisfied.  Reports one or more Google Test non-fatal failures
+  // and returns false if not.
+  bool VerifyAndClearExpectationsLocked()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex);
+
+  // Clears the ON_CALL()s set on this mock function.
+  virtual void ClearDefaultActionsLocked()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) = 0;
+
+  // In all of the following Untyped* functions, it's the caller's
+  // responsibility to guarantee the correctness of the arguments'
+  // types.
+
+  // Performs the default action with the given arguments and returns
+  // the action's result.  The call description string will be used in
+  // the error message to describe the call in the case the default
+  // action fails.
+  // L = *
+  virtual UntypedActionResultHolderBase* UntypedPerformDefaultAction(
+      const void* untyped_args,
+      const string& call_description) const = 0;
+
+  // Performs the given action with the given arguments and returns
+  // the action's result.
+  // L = *
+  virtual UntypedActionResultHolderBase* UntypedPerformAction(
+      const void* untyped_action,
+      const void* untyped_args) const = 0;
+
+  // Writes a message that the call is uninteresting (i.e. neither
+  // explicitly expected nor explicitly unexpected) to the given
+  // ostream.
+  virtual void UntypedDescribeUninterestingCall(
+      const void* untyped_args,
+      ::std::ostream* os) const
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0;
+
+  // Returns the expectation that matches the given function arguments
+  // (or NULL is there's no match); when a match is found,
+  // untyped_action is set to point to the action that should be
+  // performed (or NULL if the action is "do default"), and
+  // is_excessive is modified to indicate whether the call exceeds the
+  // expected number.
+  virtual const ExpectationBase* UntypedFindMatchingExpectation(
+      const void* untyped_args,
+      const void** untyped_action, bool* is_excessive,
+      ::std::ostream* what, ::std::ostream* why)
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) = 0;
+
+  // Prints the given function arguments to the ostream.
+  virtual void UntypedPrintArgs(const void* untyped_args,
+                                ::std::ostream* os) const = 0;
+
+  // Sets the mock object this mock method belongs to, and registers
+  // this information in the global mock registry.  Will be called
+  // whenever an EXPECT_CALL() or ON_CALL() is executed on this mock
+  // method.
+  // TODO(wan@google.com): rename to SetAndRegisterOwner().
+  void RegisterOwner(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(g_gmock_mutex);
+
+  // Sets the mock object this mock method belongs to, and sets the
+  // name of the mock function.  Will be called upon each invocation
+  // of this mock function.
+  void SetOwnerAndName(const void* mock_obj, const char* name)
+      GTEST_LOCK_EXCLUDED_(g_gmock_mutex);
+
+  // Returns the mock object this mock method belongs to.  Must be
+  // called after RegisterOwner() or SetOwnerAndName() has been
+  // called.
+  const void* MockObject() const
+      GTEST_LOCK_EXCLUDED_(g_gmock_mutex);
+
+  // Returns the name of this mock method.  Must be called after
+  // SetOwnerAndName() has been called.
+  const char* Name() const
+      GTEST_LOCK_EXCLUDED_(g_gmock_mutex);
+
+  // Returns the result of invoking this mock function with the given
+  // arguments.  This function can be safely called from multiple
+  // threads concurrently.  The caller is responsible for deleting the
+  // result.
+  UntypedActionResultHolderBase* UntypedInvokeWith(
+      const void* untyped_args)
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex);
+
+ protected:
+  typedef std::vector<const void*> UntypedOnCallSpecs;
+
+  typedef std::vector<internal::linked_ptr<ExpectationBase> >
+  UntypedExpectations;
+
+  // Returns an Expectation object that references and co-owns exp,
+  // which must be an expectation on this mock function.
+  Expectation GetHandleOf(ExpectationBase* exp);
+
+  // Address of the mock object this mock method belongs to.  Only
+  // valid after this mock method has been called or
+  // ON_CALL/EXPECT_CALL has been invoked on it.
+  const void* mock_obj_;  // Protected by g_gmock_mutex.
+
+  // Name of the function being mocked.  Only valid after this mock
+  // method has been called.
+  const char* name_;  // Protected by g_gmock_mutex.
+
+  // All default action specs for this function mocker.
+  UntypedOnCallSpecs untyped_on_call_specs_;
+
+  // All expectations for this function mocker.
+  UntypedExpectations untyped_expectations_;
+};  // class UntypedFunctionMockerBase
+
+// Untyped base class for OnCallSpec<F>.
+class UntypedOnCallSpecBase {
+ public:
+  // The arguments are the location of the ON_CALL() statement.
+  UntypedOnCallSpecBase(const char* a_file, int a_line)
+      : file_(a_file), line_(a_line), last_clause_(kNone) {}
+
+  // Where in the source file was the default action spec defined?
+  const char* file() const { return file_; }
+  int line() const { return line_; }
+
+ protected:
+  // Gives each clause in the ON_CALL() statement a name.
+  enum Clause {
+    // Do not change the order of the enum members!  The run-time
+    // syntax checking relies on it.
+    kNone,
+    kWith,
+    kWillByDefault
+  };
+
+  // Asserts that the ON_CALL() statement has a certain property.
+  void AssertSpecProperty(bool property, const string& failure_message) const {
+    Assert(property, file_, line_, failure_message);
+  }
+
+  // Expects that the ON_CALL() statement has a certain property.
+  void ExpectSpecProperty(bool property, const string& failure_message) const {
+    Expect(property, file_, line_, failure_message);
+  }
+
+  const char* file_;
+  int line_;
+
+  // The last clause in the ON_CALL() statement as seen so far.
+  // Initially kNone and changes as the statement is parsed.
+  Clause last_clause_;
+};  // class UntypedOnCallSpecBase
+
+// This template class implements an ON_CALL spec.
+template <typename F>
+class OnCallSpec : public UntypedOnCallSpecBase {
+ public:
+  typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+  typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+
+  // Constructs an OnCallSpec object from the information inside
+  // the parenthesis of an ON_CALL() statement.
+  OnCallSpec(const char* a_file, int a_line,
+             const ArgumentMatcherTuple& matchers)
+      : UntypedOnCallSpecBase(a_file, a_line),
+        matchers_(matchers),
+        // By default, extra_matcher_ should match anything.  However,
+        // we cannot initialize it with _ as that triggers a compiler
+        // bug in Symbian's C++ compiler (cannot decide between two
+        // overloaded constructors of Matcher<const ArgumentTuple&>).
+        extra_matcher_(A<const ArgumentTuple&>()) {
+  }
+
+  // Implements the .With() clause.
+  OnCallSpec& With(const Matcher<const ArgumentTuple&>& m) {
+    // Makes sure this is called at most once.
+    ExpectSpecProperty(last_clause_ < kWith,
+                       ".With() cannot appear "
+                       "more than once in an ON_CALL().");
+    last_clause_ = kWith;
+
+    extra_matcher_ = m;
+    return *this;
+  }
+
+  // Implements the .WillByDefault() clause.
+  OnCallSpec& WillByDefault(const Action<F>& action) {
+    ExpectSpecProperty(last_clause_ < kWillByDefault,
+                       ".WillByDefault() must appear "
+                       "exactly once in an ON_CALL().");
+    last_clause_ = kWillByDefault;
+
+    ExpectSpecProperty(!action.IsDoDefault(),
+                       "DoDefault() cannot be used in ON_CALL().");
+    action_ = action;
+    return *this;
+  }
+
+  // Returns true iff the given arguments match the matchers.
+  bool Matches(const ArgumentTuple& args) const {
+    return TupleMatches(matchers_, args) && extra_matcher_.Matches(args);
+  }
+
+  // Returns the action specified by the user.
+  const Action<F>& GetAction() const {
+    AssertSpecProperty(last_clause_ == kWillByDefault,
+                       ".WillByDefault() must appear exactly "
+                       "once in an ON_CALL().");
+    return action_;
+  }
+
+ private:
+  // The information in statement
+  //
+  //   ON_CALL(mock_object, Method(matchers))
+  //       .With(multi-argument-matcher)
+  //       .WillByDefault(action);
+  //
+  // is recorded in the data members like this:
+  //
+  //   source file that contains the statement => file_
+  //   line number of the statement            => line_
+  //   matchers                                => matchers_
+  //   multi-argument-matcher                  => extra_matcher_
+  //   action                                  => action_
+  ArgumentMatcherTuple matchers_;
+  Matcher<const ArgumentTuple&> extra_matcher_;
+  Action<F> action_;
+};  // class OnCallSpec
+
+// Possible reactions on uninteresting calls.
+enum CallReaction {
+  kAllow,
+  kWarn,
+  kFail,
+  kDefault = kWarn  // By default, warn about uninteresting calls.
+};
+
+}  // namespace internal
+
+// Utilities for manipulating mock objects.
+class GTEST_API_ Mock {
+ public:
+  // The following public methods can be called concurrently.
+
+  // Tells Google Mock to ignore mock_obj when checking for leaked
+  // mock objects.
+  static void AllowLeak(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Verifies and clears all expectations on the given mock object.
+  // If the expectations aren't satisfied, generates one or more
+  // Google Test non-fatal failures and returns false.
+  static bool VerifyAndClearExpectations(void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Verifies all expectations on the given mock object and clears its
+  // default actions and expectations.  Returns true iff the
+  // verification was successful.
+  static bool VerifyAndClear(void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+ private:
+  friend class internal::UntypedFunctionMockerBase;
+
+  // Needed for a function mocker to register itself (so that we know
+  // how to clear a mock object).
+  template <typename F>
+  friend class internal::FunctionMockerBase;
+
+  template <typename M>
+  friend class NiceMock;
+
+  template <typename M>
+  friend class NaggyMock;
+
+  template <typename M>
+  friend class StrictMock;
+
+  // Tells Google Mock to allow uninteresting calls on the given mock
+  // object.
+  static void AllowUninterestingCalls(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Tells Google Mock to warn the user about uninteresting calls on
+  // the given mock object.
+  static void WarnUninterestingCalls(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Tells Google Mock to fail uninteresting calls on the given mock
+  // object.
+  static void FailUninterestingCalls(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Tells Google Mock the given mock object is being destroyed and
+  // its entry in the call-reaction table should be removed.
+  static void UnregisterCallReaction(const void* mock_obj)
+      GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Returns the reaction Google Mock will have on uninteresting calls
+  // made on the given mock object.
+  static internal::CallReaction GetReactionOnUninterestingCalls(
+      const void* mock_obj)
+          GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Verifies that all expectations on the given mock object have been
+  // satisfied.  Reports one or more Google Test non-fatal failures
+  // and returns false if not.
+  static bool VerifyAndClearExpectationsLocked(void* mock_obj)
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex);
+
+  // Clears all ON_CALL()s set on the given mock object.
+  static void ClearDefaultActionsLocked(void* mock_obj)
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex);
+
+  // Registers a mock object and a mock method it owns.
+  static void Register(
+      const void* mock_obj,
+      internal::UntypedFunctionMockerBase* mocker)
+          GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Tells Google Mock where in the source code mock_obj is used in an
+  // ON_CALL or EXPECT_CALL.  In case mock_obj is leaked, this
+  // information helps the user identify which object it is.
+  static void RegisterUseByOnCallOrExpectCall(
+      const void* mock_obj, const char* file, int line)
+          GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex);
+
+  // Unregisters a mock method; removes the owning mock object from
+  // the registry when the last mock method associated with it has
+  // been unregistered.  This is called only in the destructor of
+  // FunctionMockerBase.
+  static void UnregisterLocked(internal::UntypedFunctionMockerBase* mocker)
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex);
+};  // class Mock
+
+// An abstract handle of an expectation.  Useful in the .After()
+// clause of EXPECT_CALL() for setting the (partial) order of
+// expectations.  The syntax:
+//
+//   Expectation e1 = EXPECT_CALL(...)...;
+//   EXPECT_CALL(...).After(e1)...;
+//
+// sets two expectations where the latter can only be matched after
+// the former has been satisfied.
+//
+// Notes:
+//   - This class is copyable and has value semantics.
+//   - Constness is shallow: a const Expectation object itself cannot
+//     be modified, but the mutable methods of the ExpectationBase
+//     object it references can be called via expectation_base().
+//   - The constructors and destructor are defined out-of-line because
+//     the Symbian WINSCW compiler wants to otherwise instantiate them
+//     when it sees this class definition, at which point it doesn't have
+//     ExpectationBase available yet, leading to incorrect destruction
+//     in the linked_ptr (or compilation errors if using a checking
+//     linked_ptr).
+class GTEST_API_ Expectation {
+ public:
+  // Constructs a null object that doesn't reference any expectation.
+  Expectation();
+
+  ~Expectation();
+
+  // This single-argument ctor must not be explicit, in order to support the
+  //   Expectation e = EXPECT_CALL(...);
+  // syntax.
+  //
+  // A TypedExpectation object stores its pre-requisites as
+  // Expectation objects, and needs to call the non-const Retire()
+  // method on the ExpectationBase objects they reference.  Therefore
+  // Expectation must receive a *non-const* reference to the
+  // ExpectationBase object.
+  Expectation(internal::ExpectationBase& exp);  // NOLINT
+
+  // The compiler-generated copy ctor and operator= work exactly as
+  // intended, so we don't need to define our own.
+
+  // Returns true iff rhs references the same expectation as this object does.
+  bool operator==(const Expectation& rhs) const {
+    return expectation_base_ == rhs.expectation_base_;
+  }
+
+  bool operator!=(const Expectation& rhs) const { return !(*this == rhs); }
+
+ private:
+  friend class ExpectationSet;
+  friend class Sequence;
+  friend class ::testing::internal::ExpectationBase;
+  friend class ::testing::internal::UntypedFunctionMockerBase;
+
+  template <typename F>
+  friend class ::testing::internal::FunctionMockerBase;
+
+  template <typename F>
+  friend class ::testing::internal::TypedExpectation;
+
+  // This comparator is needed for putting Expectation objects into a set.
+  class Less {
+   public:
+    bool operator()(const Expectation& lhs, const Expectation& rhs) const {
+      return lhs.expectation_base_.get() < rhs.expectation_base_.get();
+    }
+  };
+
+  typedef ::std::set<Expectation, Less> Set;
+
+  Expectation(
+      const internal::linked_ptr<internal::ExpectationBase>& expectation_base);
+
+  // Returns the expectation this object references.
+  const internal::linked_ptr<internal::ExpectationBase>&
+  expectation_base() const {
+    return expectation_base_;
+  }
+
+  // A linked_ptr that co-owns the expectation this handle references.
+  internal::linked_ptr<internal::ExpectationBase> expectation_base_;
+};
+
+// A set of expectation handles.  Useful in the .After() clause of
+// EXPECT_CALL() for setting the (partial) order of expectations.  The
+// syntax:
+//
+//   ExpectationSet es;
+//   es += EXPECT_CALL(...)...;
+//   es += EXPECT_CALL(...)...;
+//   EXPECT_CALL(...).After(es)...;
+//
+// sets three expectations where the last one can only be matched
+// after the first two have both been satisfied.
+//
+// This class is copyable and has value semantics.
+class ExpectationSet {
+ public:
+  // A bidirectional iterator that can read a const element in the set.
+  typedef Expectation::Set::const_iterator const_iterator;
+
+  // An object stored in the set.  This is an alias of Expectation.
+  typedef Expectation::Set::value_type value_type;
+
+  // Constructs an empty set.
+  ExpectationSet() {}
+
+  // This single-argument ctor must not be explicit, in order to support the
+  //   ExpectationSet es = EXPECT_CALL(...);
+  // syntax.
+  ExpectationSet(internal::ExpectationBase& exp) {  // NOLINT
+    *this += Expectation(exp);
+  }
+
+  // This single-argument ctor implements implicit conversion from
+  // Expectation and thus must not be explicit.  This allows either an
+  // Expectation or an ExpectationSet to be used in .After().
+  ExpectationSet(const Expectation& e) {  // NOLINT
+    *this += e;
+  }
+
+  // The compiler-generator ctor and operator= works exactly as
+  // intended, so we don't need to define our own.
+
+  // Returns true iff rhs contains the same set of Expectation objects
+  // as this does.
+  bool operator==(const ExpectationSet& rhs) const {
+    return expectations_ == rhs.expectations_;
+  }
+
+  bool operator!=(const ExpectationSet& rhs) const { return !(*this == rhs); }
+
+  // Implements the syntax
+  //   expectation_set += EXPECT_CALL(...);
+  ExpectationSet& operator+=(const Expectation& e) {
+    expectations_.insert(e);
+    return *this;
+  }
+
+  int size() const { return static_cast<int>(expectations_.size()); }
+
+  const_iterator begin() const { return expectations_.begin(); }
+  const_iterator end() const { return expectations_.end(); }
+
+ private:
+  Expectation::Set expectations_;
+};
+
+
+// Sequence objects are used by a user to specify the relative order
+// in which the expectations should match.  They are copyable (we rely
+// on the compiler-defined copy constructor and assignment operator).
+class GTEST_API_ Sequence {
+ public:
+  // Constructs an empty sequence.
+  Sequence() : last_expectation_(new Expectation) {}
+
+  // Adds an expectation to this sequence.  The caller must ensure
+  // that no other thread is accessing this Sequence object.
+  void AddExpectation(const Expectation& expectation) const;
+
+ private:
+  // The last expectation in this sequence.  We use a linked_ptr here
+  // because Sequence objects are copyable and we want the copies to
+  // be aliases.  The linked_ptr allows the copies to co-own and share
+  // the same Expectation object.
+  internal::linked_ptr<Expectation> last_expectation_;
+};  // class Sequence
+
+// An object of this type causes all EXPECT_CALL() statements
+// encountered in its scope to be put in an anonymous sequence.  The
+// work is done in the constructor and destructor.  You should only
+// create an InSequence object on the stack.
+//
+// The sole purpose for this class is to support easy definition of
+// sequential expectations, e.g.
+//
+//   {
+//     InSequence dummy;  // The name of the object doesn't matter.
+//
+//     // The following expectations must match in the order they appear.
+//     EXPECT_CALL(a, Bar())...;
+//     EXPECT_CALL(a, Baz())...;
+//     ...
+//     EXPECT_CALL(b, Xyz())...;
+//   }
+//
+// You can create InSequence objects in multiple threads, as long as
+// they are used to affect different mock objects.  The idea is that
+// each thread can create and set up its own mocks as if it's the only
+// thread.  However, for clarity of your tests we recommend you to set
+// up mocks in the main thread unless you have a good reason not to do
+// so.
+class GTEST_API_ InSequence {
+ public:
+  InSequence();
+  ~InSequence();
+ private:
+  bool sequence_created_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(InSequence);  // NOLINT
+} GTEST_ATTRIBUTE_UNUSED_;
+
+namespace internal {
+
+// Points to the implicit sequence introduced by a living InSequence
+// object (if any) in the current thread or NULL.
+GTEST_API_ extern ThreadLocal<Sequence*> g_gmock_implicit_sequence;
+
+// Base class for implementing expectations.
+//
+// There are two reasons for having a type-agnostic base class for
+// Expectation:
+//
+//   1. We need to store collections of expectations of different
+//   types (e.g. all pre-requisites of a particular expectation, all
+//   expectations in a sequence).  Therefore these expectation objects
+//   must share a common base class.
+//
+//   2. We can avoid binary code bloat by moving methods not depending
+//   on the template argument of Expectation to the base class.
+//
+// This class is internal and mustn't be used by user code directly.
+class GTEST_API_ ExpectationBase {
+ public:
+  // source_text is the EXPECT_CALL(...) source that created this Expectation.
+  ExpectationBase(const char* file, int line, const string& source_text);
+
+  virtual ~ExpectationBase();
+
+  // Where in the source file was the expectation spec defined?
+  const char* file() const { return file_; }
+  int line() const { return line_; }
+  const char* source_text() const { return source_text_.c_str(); }
+  // Returns the cardinality specified in the expectation spec.
+  const Cardinality& cardinality() const { return cardinality_; }
+
+  // Describes the source file location of this expectation.
+  void DescribeLocationTo(::std::ostream* os) const {
+    *os << FormatFileLocation(file(), line()) << " ";
+  }
+
+  // Describes how many times a function call matching this
+  // expectation has occurred.
+  void DescribeCallCountTo(::std::ostream* os) const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex);
+
+  // If this mock method has an extra matcher (i.e. .With(matcher)),
+  // describes it to the ostream.
+  virtual void MaybeDescribeExtraMatcherTo(::std::ostream* os) = 0;
+
+ protected:
+  friend class ::testing::Expectation;
+  friend class UntypedFunctionMockerBase;
+
+  enum Clause {
+    // Don't change the order of the enum members!
+    kNone,
+    kWith,
+    kTimes,
+    kInSequence,
+    kAfter,
+    kWillOnce,
+    kWillRepeatedly,
+    kRetiresOnSaturation
+  };
+
+  typedef std::vector<const void*> UntypedActions;
+
+  // Returns an Expectation object that references and co-owns this
+  // expectation.
+  virtual Expectation GetHandle() = 0;
+
+  // Asserts that the EXPECT_CALL() statement has the given property.
+  void AssertSpecProperty(bool property, const string& failure_message) const {
+    Assert(property, file_, line_, failure_message);
+  }
+
+  // Expects that the EXPECT_CALL() statement has the given property.
+  void ExpectSpecProperty(bool property, const string& failure_message) const {
+    Expect(property, file_, line_, failure_message);
+  }
+
+  // Explicitly specifies the cardinality of this expectation.  Used
+  // by the subclasses to implement the .Times() clause.
+  void SpecifyCardinality(const Cardinality& cardinality);
+
+  // Returns true iff the user specified the cardinality explicitly
+  // using a .Times().
+  bool cardinality_specified() const { return cardinality_specified_; }
+
+  // Sets the cardinality of this expectation spec.
+  void set_cardinality(const Cardinality& a_cardinality) {
+    cardinality_ = a_cardinality;
+  }
+
+  // The following group of methods should only be called after the
+  // EXPECT_CALL() statement, and only when g_gmock_mutex is held by
+  // the current thread.
+
+  // Retires all pre-requisites of this expectation.
+  void RetireAllPreRequisites()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex);
+
+  // Returns true iff this expectation is retired.
+  bool is_retired() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return retired_;
+  }
+
+  // Retires this expectation.
+  void Retire()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    retired_ = true;
+  }
+
+  // Returns true iff this expectation is satisfied.
+  bool IsSatisfied() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return cardinality().IsSatisfiedByCallCount(call_count_);
+  }
+
+  // Returns true iff this expectation is saturated.
+  bool IsSaturated() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return cardinality().IsSaturatedByCallCount(call_count_);
+  }
+
+  // Returns true iff this expectation is over-saturated.
+  bool IsOverSaturated() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return cardinality().IsOverSaturatedByCallCount(call_count_);
+  }
+
+  // Returns true iff all pre-requisites of this expectation are satisfied.
+  bool AllPrerequisitesAreSatisfied() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex);
+
+  // Adds unsatisfied pre-requisites of this expectation to 'result'.
+  void FindUnsatisfiedPrerequisites(ExpectationSet* result) const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex);
+
+  // Returns the number this expectation has been invoked.
+  int call_count() const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return call_count_;
+  }
+
+  // Increments the number this expectation has been invoked.
+  void IncrementCallCount()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    call_count_++;
+  }
+
+  // Checks the action count (i.e. the number of WillOnce() and
+  // WillRepeatedly() clauses) against the cardinality if this hasn't
+  // been done before.  Prints a warning if there are too many or too
+  // few actions.
+  void CheckActionCountIfNotDone() const
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  friend class ::testing::Sequence;
+  friend class ::testing::internal::ExpectationTester;
+
+  template <typename Function>
+  friend class TypedExpectation;
+
+  // Implements the .Times() clause.
+  void UntypedTimes(const Cardinality& a_cardinality);
+
+  // This group of fields are part of the spec and won't change after
+  // an EXPECT_CALL() statement finishes.
+  const char* file_;          // The file that contains the expectation.
+  int line_;                  // The line number of the expectation.
+  const string source_text_;  // The EXPECT_CALL(...) source text.
+  // True iff the cardinality is specified explicitly.
+  bool cardinality_specified_;
+  Cardinality cardinality_;            // The cardinality of the expectation.
+  // The immediate pre-requisites (i.e. expectations that must be
+  // satisfied before this expectation can be matched) of this
+  // expectation.  We use linked_ptr in the set because we want an
+  // Expectation object to be co-owned by its FunctionMocker and its
+  // successors.  This allows multiple mock objects to be deleted at
+  // different times.
+  ExpectationSet immediate_prerequisites_;
+
+  // This group of fields are the current state of the expectation,
+  // and can change as the mock function is called.
+  int call_count_;  // How many times this expectation has been invoked.
+  bool retired_;    // True iff this expectation has retired.
+  UntypedActions untyped_actions_;
+  bool extra_matcher_specified_;
+  bool repeated_action_specified_;  // True if a WillRepeatedly() was specified.
+  bool retires_on_saturation_;
+  Clause last_clause_;
+  mutable bool action_count_checked_;  // Under mutex_.
+  mutable Mutex mutex_;  // Protects action_count_checked_.
+
+  GTEST_DISALLOW_ASSIGN_(ExpectationBase);
+};  // class ExpectationBase
+
+// Impements an expectation for the given function type.
+template <typename F>
+class TypedExpectation : public ExpectationBase {
+ public:
+  typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+  typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+  typedef typename Function<F>::Result Result;
+
+  TypedExpectation(FunctionMockerBase<F>* owner,
+                   const char* a_file, int a_line, const string& a_source_text,
+                   const ArgumentMatcherTuple& m)
+      : ExpectationBase(a_file, a_line, a_source_text),
+        owner_(owner),
+        matchers_(m),
+        // By default, extra_matcher_ should match anything.  However,
+        // we cannot initialize it with _ as that triggers a compiler
+        // bug in Symbian's C++ compiler (cannot decide between two
+        // overloaded constructors of Matcher<const ArgumentTuple&>).
+        extra_matcher_(A<const ArgumentTuple&>()),
+        repeated_action_(DoDefault()) {}
+
+  virtual ~TypedExpectation() {
+    // Check the validity of the action count if it hasn't been done
+    // yet (for example, if the expectation was never used).
+    CheckActionCountIfNotDone();
+    for (UntypedActions::const_iterator it = untyped_actions_.begin();
+         it != untyped_actions_.end(); ++it) {
+      delete static_cast<const Action<F>*>(*it);
+    }
+  }
+
+  // Implements the .With() clause.
+  TypedExpectation& With(const Matcher<const ArgumentTuple&>& m) {
+    if (last_clause_ == kWith) {
+      ExpectSpecProperty(false,
+                         ".With() cannot appear "
+                         "more than once in an EXPECT_CALL().");
+    } else {
+      ExpectSpecProperty(last_clause_ < kWith,
+                         ".With() must be the first "
+                         "clause in an EXPECT_CALL().");
+    }
+    last_clause_ = kWith;
+
+    extra_matcher_ = m;
+    extra_matcher_specified_ = true;
+    return *this;
+  }
+
+  // Implements the .Times() clause.
+  TypedExpectation& Times(const Cardinality& a_cardinality) {
+    ExpectationBase::UntypedTimes(a_cardinality);
+    return *this;
+  }
+
+  // Implements the .Times() clause.
+  TypedExpectation& Times(int n) {
+    return Times(Exactly(n));
+  }
+
+  // Implements the .InSequence() clause.
+  TypedExpectation& InSequence(const Sequence& s) {
+    ExpectSpecProperty(last_clause_ <= kInSequence,
+                       ".InSequence() cannot appear after .After(),"
+                       " .WillOnce(), .WillRepeatedly(), or "
+                       ".RetiresOnSaturation().");
+    last_clause_ = kInSequence;
+
+    s.AddExpectation(GetHandle());
+    return *this;
+  }
+  TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2) {
+    return InSequence(s1).InSequence(s2);
+  }
+  TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+                               const Sequence& s3) {
+    return InSequence(s1, s2).InSequence(s3);
+  }
+  TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+                               const Sequence& s3, const Sequence& s4) {
+    return InSequence(s1, s2, s3).InSequence(s4);
+  }
+  TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+                               const Sequence& s3, const Sequence& s4,
+                               const Sequence& s5) {
+    return InSequence(s1, s2, s3, s4).InSequence(s5);
+  }
+
+  // Implements that .After() clause.
+  TypedExpectation& After(const ExpectationSet& s) {
+    ExpectSpecProperty(last_clause_ <= kAfter,
+                       ".After() cannot appear after .WillOnce(),"
+                       " .WillRepeatedly(), or "
+                       ".RetiresOnSaturation().");
+    last_clause_ = kAfter;
+
+    for (ExpectationSet::const_iterator it = s.begin(); it != s.end(); ++it) {
+      immediate_prerequisites_ += *it;
+    }
+    return *this;
+  }
+  TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2) {
+    return After(s1).After(s2);
+  }
+  TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+                          const ExpectationSet& s3) {
+    return After(s1, s2).After(s3);
+  }
+  TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+                          const ExpectationSet& s3, const ExpectationSet& s4) {
+    return After(s1, s2, s3).After(s4);
+  }
+  TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+                          const ExpectationSet& s3, const ExpectationSet& s4,
+                          const ExpectationSet& s5) {
+    return After(s1, s2, s3, s4).After(s5);
+  }
+
+  // Implements the .WillOnce() clause.
+  TypedExpectation& WillOnce(const Action<F>& action) {
+    ExpectSpecProperty(last_clause_ <= kWillOnce,
+                       ".WillOnce() cannot appear after "
+                       ".WillRepeatedly() or .RetiresOnSaturation().");
+    last_clause_ = kWillOnce;
+
+    untyped_actions_.push_back(new Action<F>(action));
+    if (!cardinality_specified()) {
+      set_cardinality(Exactly(static_cast<int>(untyped_actions_.size())));
+    }
+    return *this;
+  }
+
+  // Implements the .WillRepeatedly() clause.
+  TypedExpectation& WillRepeatedly(const Action<F>& action) {
+    if (last_clause_ == kWillRepeatedly) {
+      ExpectSpecProperty(false,
+                         ".WillRepeatedly() cannot appear "
+                         "more than once in an EXPECT_CALL().");
+    } else {
+      ExpectSpecProperty(last_clause_ < kWillRepeatedly,
+                         ".WillRepeatedly() cannot appear "
+                         "after .RetiresOnSaturation().");
+    }
+    last_clause_ = kWillRepeatedly;
+    repeated_action_specified_ = true;
+
+    repeated_action_ = action;
+    if (!cardinality_specified()) {
+      set_cardinality(AtLeast(static_cast<int>(untyped_actions_.size())));
+    }
+
+    // Now that no more action clauses can be specified, we check
+    // whether their count makes sense.
+    CheckActionCountIfNotDone();
+    return *this;
+  }
+
+  // Implements the .RetiresOnSaturation() clause.
+  TypedExpectation& RetiresOnSaturation() {
+    ExpectSpecProperty(last_clause_ < kRetiresOnSaturation,
+                       ".RetiresOnSaturation() cannot appear "
+                       "more than once.");
+    last_clause_ = kRetiresOnSaturation;
+    retires_on_saturation_ = true;
+
+    // Now that no more action clauses can be specified, we check
+    // whether their count makes sense.
+    CheckActionCountIfNotDone();
+    return *this;
+  }
+
+  // Returns the matchers for the arguments as specified inside the
+  // EXPECT_CALL() macro.
+  const ArgumentMatcherTuple& matchers() const {
+    return matchers_;
+  }
+
+  // Returns the matcher specified by the .With() clause.
+  const Matcher<const ArgumentTuple&>& extra_matcher() const {
+    return extra_matcher_;
+  }
+
+  // Returns the action specified by the .WillRepeatedly() clause.
+  const Action<F>& repeated_action() const { return repeated_action_; }
+
+  // If this mock method has an extra matcher (i.e. .With(matcher)),
+  // describes it to the ostream.
+  virtual void MaybeDescribeExtraMatcherTo(::std::ostream* os) {
+    if (extra_matcher_specified_) {
+      *os << "    Expected args: ";
+      extra_matcher_.DescribeTo(os);
+      *os << "\n";
+    }
+  }
+
+ private:
+  template <typename Function>
+  friend class FunctionMockerBase;
+
+  // Returns an Expectation object that references and co-owns this
+  // expectation.
+  virtual Expectation GetHandle() {
+    return owner_->GetHandleOf(this);
+  }
+
+  // The following methods will be called only after the EXPECT_CALL()
+  // statement finishes and when the current thread holds
+  // g_gmock_mutex.
+
+  // Returns true iff this expectation matches the given arguments.
+  bool Matches(const ArgumentTuple& args) const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    return TupleMatches(matchers_, args) && extra_matcher_.Matches(args);
+  }
+
+  // Returns true iff this expectation should handle the given arguments.
+  bool ShouldHandleArguments(const ArgumentTuple& args) const
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+
+    // In case the action count wasn't checked when the expectation
+    // was defined (e.g. if this expectation has no WillRepeatedly()
+    // or RetiresOnSaturation() clause), we check it when the
+    // expectation is used for the first time.
+    CheckActionCountIfNotDone();
+    return !is_retired() && AllPrerequisitesAreSatisfied() && Matches(args);
+  }
+
+  // Describes the result of matching the arguments against this
+  // expectation to the given ostream.
+  void ExplainMatchResultTo(
+      const ArgumentTuple& args,
+      ::std::ostream* os) const
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+
+    if (is_retired()) {
+      *os << "         Expected: the expectation is active\n"
+          << "           Actual: it is retired\n";
+    } else if (!Matches(args)) {
+      if (!TupleMatches(matchers_, args)) {
+        ExplainMatchFailureTupleTo(matchers_, args, os);
+      }
+      StringMatchResultListener listener;
+      if (!extra_matcher_.MatchAndExplain(args, &listener)) {
+        *os << "    Expected args: ";
+        extra_matcher_.DescribeTo(os);
+        *os << "\n           Actual: don't match";
+
+        internal::PrintIfNotEmpty(listener.str(), os);
+        *os << "\n";
+      }
+    } else if (!AllPrerequisitesAreSatisfied()) {
+      *os << "         Expected: all pre-requisites are satisfied\n"
+          << "           Actual: the following immediate pre-requisites "
+          << "are not satisfied:\n";
+      ExpectationSet unsatisfied_prereqs;
+      FindUnsatisfiedPrerequisites(&unsatisfied_prereqs);
+      int i = 0;
+      for (ExpectationSet::const_iterator it = unsatisfied_prereqs.begin();
+           it != unsatisfied_prereqs.end(); ++it) {
+        it->expectation_base()->DescribeLocationTo(os);
+        *os << "pre-requisite #" << i++ << "\n";
+      }
+      *os << "                   (end of pre-requisites)\n";
+    } else {
+      // This line is here just for completeness' sake.  It will never
+      // be executed as currently the ExplainMatchResultTo() function
+      // is called only when the mock function call does NOT match the
+      // expectation.
+      *os << "The call matches the expectation.\n";
+    }
+  }
+
+  // Returns the action that should be taken for the current invocation.
+  const Action<F>& GetCurrentAction(
+      const FunctionMockerBase<F>* mocker,
+      const ArgumentTuple& args) const
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    const int count = call_count();
+    Assert(count >= 1, __FILE__, __LINE__,
+           "call_count() is <= 0 when GetCurrentAction() is "
+           "called - this should never happen.");
+
+    const int action_count = static_cast<int>(untyped_actions_.size());
+    if (action_count > 0 && !repeated_action_specified_ &&
+        count > action_count) {
+      // If there is at least one WillOnce() and no WillRepeatedly(),
+      // we warn the user when the WillOnce() clauses ran out.
+      ::std::stringstream ss;
+      DescribeLocationTo(&ss);
+      ss << "Actions ran out in " << source_text() << "...\n"
+         << "Called " << count << " times, but only "
+         << action_count << " WillOnce()"
+         << (action_count == 1 ? " is" : "s are") << " specified - ";
+      mocker->DescribeDefaultActionTo(args, &ss);
+      Log(kWarning, ss.str(), 1);
+    }
+
+    return count <= action_count ?
+        *static_cast<const Action<F>*>(untyped_actions_[count - 1]) :
+        repeated_action();
+  }
+
+  // Given the arguments of a mock function call, if the call will
+  // over-saturate this expectation, returns the default action;
+  // otherwise, returns the next action in this expectation.  Also
+  // describes *what* happened to 'what', and explains *why* Google
+  // Mock does it to 'why'.  This method is not const as it calls
+  // IncrementCallCount().  A return value of NULL means the default
+  // action.
+  const Action<F>* GetActionForArguments(
+      const FunctionMockerBase<F>* mocker,
+      const ArgumentTuple& args,
+      ::std::ostream* what,
+      ::std::ostream* why)
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    if (IsSaturated()) {
+      // We have an excessive call.
+      IncrementCallCount();
+      *what << "Mock function called more times than expected - ";
+      mocker->DescribeDefaultActionTo(args, what);
+      DescribeCallCountTo(why);
+
+      // TODO(wan@google.com): allow the user to control whether
+      // unexpected calls should fail immediately or continue using a
+      // flag --gmock_unexpected_calls_are_fatal.
+      return NULL;
+    }
+
+    IncrementCallCount();
+    RetireAllPreRequisites();
+
+    if (retires_on_saturation_ && IsSaturated()) {
+      Retire();
+    }
+
+    // Must be done after IncrementCount()!
+    *what << "Mock function call matches " << source_text() <<"...\n";
+    return &(GetCurrentAction(mocker, args));
+  }
+
+  // All the fields below won't change once the EXPECT_CALL()
+  // statement finishes.
+  FunctionMockerBase<F>* const owner_;
+  ArgumentMatcherTuple matchers_;
+  Matcher<const ArgumentTuple&> extra_matcher_;
+  Action<F> repeated_action_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TypedExpectation);
+};  // class TypedExpectation
+
+// A MockSpec object is used by ON_CALL() or EXPECT_CALL() for
+// specifying the default behavior of, or expectation on, a mock
+// function.
+
+// Note: class MockSpec really belongs to the ::testing namespace.
+// However if we define it in ::testing, MSVC will complain when
+// classes in ::testing::internal declare it as a friend class
+// template.  To workaround this compiler bug, we define MockSpec in
+// ::testing::internal and import it into ::testing.
+
+// Logs a message including file and line number information.
+GTEST_API_ void LogWithLocation(testing::internal::LogSeverity severity,
+                                const char* file, int line,
+                                const string& message);
+
+template <typename F>
+class MockSpec {
+ public:
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+  typedef typename internal::Function<F>::ArgumentMatcherTuple
+      ArgumentMatcherTuple;
+
+  // Constructs a MockSpec object, given the function mocker object
+  // that the spec is associated with.
+  explicit MockSpec(internal::FunctionMockerBase<F>* function_mocker)
+      : function_mocker_(function_mocker) {}
+
+  // Adds a new default action spec to the function mocker and returns
+  // the newly created spec.
+  internal::OnCallSpec<F>& InternalDefaultActionSetAt(
+      const char* file, int line, const char* obj, const char* call) {
+    LogWithLocation(internal::kInfo, file, line,
+        string("ON_CALL(") + obj + ", " + call + ") invoked");
+    return function_mocker_->AddNewOnCallSpec(file, line, matchers_);
+  }
+
+  // Adds a new expectation spec to the function mocker and returns
+  // the newly created spec.
+  internal::TypedExpectation<F>& InternalExpectedAt(
+      const char* file, int line, const char* obj, const char* call) {
+    const string source_text(string("EXPECT_CALL(") + obj + ", " + call + ")");
+    LogWithLocation(internal::kInfo, file, line, source_text + " invoked");
+    return function_mocker_->AddNewExpectation(
+        file, line, source_text, matchers_);
+  }
+
+ private:
+  template <typename Function>
+  friend class internal::FunctionMocker;
+
+  void SetMatchers(const ArgumentMatcherTuple& matchers) {
+    matchers_ = matchers;
+  }
+
+  // The function mocker that owns this spec.
+  internal::FunctionMockerBase<F>* const function_mocker_;
+  // The argument matchers specified in the spec.
+  ArgumentMatcherTuple matchers_;
+
+  GTEST_DISALLOW_ASSIGN_(MockSpec);
+};  // class MockSpec
+
+// Wrapper type for generically holding an ordinary value or lvalue reference.
+// If T is not a reference type, it must be copyable or movable.
+// ReferenceOrValueWrapper<T> is movable, and will also be copyable unless
+// T is a move-only value type (which means that it will always be copyable
+// if the current platform does not support move semantics).
+//
+// The primary template defines handling for values, but function header
+// comments describe the contract for the whole template (including
+// specializations).
+template <typename T>
+class ReferenceOrValueWrapper {
+ public:
+  // Constructs a wrapper from the given value/reference.
+  explicit ReferenceOrValueWrapper(T value)
+      : value_(::testing::internal::move(value)) {
+  }
+
+  // Unwraps and returns the underlying value/reference, exactly as
+  // originally passed. The behavior of calling this more than once on
+  // the same object is unspecified.
+  T Unwrap() { return ::testing::internal::move(value_); }
+
+  // Provides nondestructive access to the underlying value/reference.
+  // Always returns a const reference (more precisely,
+  // const RemoveReference<T>&). The behavior of calling this after
+  // calling Unwrap on the same object is unspecified.
+  const T& Peek() const {
+    return value_;
+  }
+
+ private:
+  T value_;
+};
+
+// Specialization for lvalue reference types. See primary template
+// for documentation.
+template <typename T>
+class ReferenceOrValueWrapper<T&> {
+ public:
+  // Workaround for debatable pass-by-reference lint warning (c-library-team
+  // policy precludes NOLINT in this context)
+  typedef T& reference;
+  explicit ReferenceOrValueWrapper(reference ref)
+      : value_ptr_(&ref) {}
+  T& Unwrap() { return *value_ptr_; }
+  const T& Peek() const { return *value_ptr_; }
+
+ private:
+  T* value_ptr_;
+};
+
+// MSVC warns about using 'this' in base member initializer list, so
+// we need to temporarily disable the warning.  We have to do it for
+// the entire class to suppress the warning, even though it's about
+// the constructor only.
+
+#ifdef _MSC_VER
+# pragma warning(push)          // Saves the current warning state.
+# pragma warning(disable:4355)  // Temporarily disables warning 4355.
+#endif  // _MSV_VER
+
+// C++ treats the void type specially.  For example, you cannot define
+// a void-typed variable or pass a void value to a function.
+// ActionResultHolder<T> holds a value of type T, where T must be a
+// copyable type or void (T doesn't need to be default-constructable).
+// It hides the syntactic difference between void and other types, and
+// is used to unify the code for invoking both void-returning and
+// non-void-returning mock functions.
+
+// Untyped base class for ActionResultHolder<T>.
+class UntypedActionResultHolderBase {
+ public:
+  virtual ~UntypedActionResultHolderBase() {}
+
+  // Prints the held value as an action's result to os.
+  virtual void PrintAsActionResult(::std::ostream* os) const = 0;
+};
+
+// This generic definition is used when T is not void.
+template <typename T>
+class ActionResultHolder : public UntypedActionResultHolderBase {
+ public:
+  // Returns the held value. Must not be called more than once.
+  T Unwrap() {
+    return result_.Unwrap();
+  }
+
+  // Prints the held value as an action's result to os.
+  virtual void PrintAsActionResult(::std::ostream* os) const {
+    *os << "\n          Returns: ";
+    // T may be a reference type, so we don't use UniversalPrint().
+    UniversalPrinter<T>::Print(result_.Peek(), os);
+  }
+
+  // Performs the given mock function's default action and returns the
+  // result in a new-ed ActionResultHolder.
+  template <typename F>
+  static ActionResultHolder* PerformDefaultAction(
+      const FunctionMockerBase<F>* func_mocker,
+      const typename Function<F>::ArgumentTuple& args,
+      const string& call_description) {
+    return new ActionResultHolder(Wrapper(
+        func_mocker->PerformDefaultAction(args, call_description)));
+  }
+
+  // Performs the given action and returns the result in a new-ed
+  // ActionResultHolder.
+  template <typename F>
+  static ActionResultHolder*
+  PerformAction(const Action<F>& action,
+                const typename Function<F>::ArgumentTuple& args) {
+    return new ActionResultHolder(Wrapper(action.Perform(args)));
+  }
+
+ private:
+  typedef ReferenceOrValueWrapper<T> Wrapper;
+
+  explicit ActionResultHolder(Wrapper result)
+      : result_(::testing::internal::move(result)) {
+  }
+
+  Wrapper result_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionResultHolder);
+};
+
+// Specialization for T = void.
+template <>
+class ActionResultHolder<void> : public UntypedActionResultHolderBase {
+ public:
+  void Unwrap() { }
+
+  virtual void PrintAsActionResult(::std::ostream* /* os */) const {}
+
+  // Performs the given mock function's default action and returns ownership
+  // of an empty ActionResultHolder*.
+  template <typename F>
+  static ActionResultHolder* PerformDefaultAction(
+      const FunctionMockerBase<F>* func_mocker,
+      const typename Function<F>::ArgumentTuple& args,
+      const string& call_description) {
+    func_mocker->PerformDefaultAction(args, call_description);
+    return new ActionResultHolder;
+  }
+
+  // Performs the given action and returns ownership of an empty
+  // ActionResultHolder*.
+  template <typename F>
+  static ActionResultHolder* PerformAction(
+      const Action<F>& action,
+      const typename Function<F>::ArgumentTuple& args) {
+    action.Perform(args);
+    return new ActionResultHolder;
+  }
+
+ private:
+  ActionResultHolder() {}
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionResultHolder);
+};
+
+// The base of the function mocker class for the given function type.
+// We put the methods in this class instead of its child to avoid code
+// bloat.
+template <typename F>
+class FunctionMockerBase : public UntypedFunctionMockerBase {
+ public:
+  typedef typename Function<F>::Result Result;
+  typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+  typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+
+  FunctionMockerBase() : current_spec_(this) {}
+
+  // The destructor verifies that all expectations on this mock
+  // function have been satisfied.  If not, it will report Google Test
+  // non-fatal failures for the violations.
+  virtual ~FunctionMockerBase()
+        GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    MutexLock l(&g_gmock_mutex);
+    VerifyAndClearExpectationsLocked();
+    Mock::UnregisterLocked(this);
+    ClearDefaultActionsLocked();
+  }
+
+  // Returns the ON_CALL spec that matches this mock function with the
+  // given arguments; returns NULL if no matching ON_CALL is found.
+  // L = *
+  const OnCallSpec<F>* FindOnCallSpec(
+      const ArgumentTuple& args) const {
+    for (UntypedOnCallSpecs::const_reverse_iterator it
+             = untyped_on_call_specs_.rbegin();
+         it != untyped_on_call_specs_.rend(); ++it) {
+      const OnCallSpec<F>* spec = static_cast<const OnCallSpec<F>*>(*it);
+      if (spec->Matches(args))
+        return spec;
+    }
+
+    return NULL;
+  }
+
+  // Performs the default action of this mock function on the given
+  // arguments and returns the result. Asserts (or throws if
+  // exceptions are enabled) with a helpful call descrption if there
+  // is no valid return value. This method doesn't depend on the
+  // mutable state of this object, and thus can be called concurrently
+  // without locking.
+  // L = *
+  Result PerformDefaultAction(const ArgumentTuple& args,
+                              const string& call_description) const {
+    const OnCallSpec<F>* const spec =
+        this->FindOnCallSpec(args);
+    if (spec != NULL) {
+      return spec->GetAction().Perform(args);
+    }
+    const string message = call_description +
+        "\n    The mock function has no default action "
+        "set, and its return type has no default value set.";
+#if GTEST_HAS_EXCEPTIONS
+    if (!DefaultValue<Result>::Exists()) {
+      throw std::runtime_error(message);
+    }
+#else
+    Assert(DefaultValue<Result>::Exists(), "", -1, message);
+#endif
+    return DefaultValue<Result>::Get();
+  }
+
+  // Performs the default action with the given arguments and returns
+  // the action's result.  The call description string will be used in
+  // the error message to describe the call in the case the default
+  // action fails.  The caller is responsible for deleting the result.
+  // L = *
+  virtual UntypedActionResultHolderBase* UntypedPerformDefaultAction(
+      const void* untyped_args,  // must point to an ArgumentTuple
+      const string& call_description) const {
+    const ArgumentTuple& args =
+        *static_cast<const ArgumentTuple*>(untyped_args);
+    return ResultHolder::PerformDefaultAction(this, args, call_description);
+  }
+
+  // Performs the given action with the given arguments and returns
+  // the action's result.  The caller is responsible for deleting the
+  // result.
+  // L = *
+  virtual UntypedActionResultHolderBase* UntypedPerformAction(
+      const void* untyped_action, const void* untyped_args) const {
+    // Make a copy of the action before performing it, in case the
+    // action deletes the mock object (and thus deletes itself).
+    const Action<F> action = *static_cast<const Action<F>*>(untyped_action);
+    const ArgumentTuple& args =
+        *static_cast<const ArgumentTuple*>(untyped_args);
+    return ResultHolder::PerformAction(action, args);
+  }
+
+  // Implements UntypedFunctionMockerBase::ClearDefaultActionsLocked():
+  // clears the ON_CALL()s set on this mock function.
+  virtual void ClearDefaultActionsLocked()
+      GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+
+    // Deleting our default actions may trigger other mock objects to be
+    // deleted, for example if an action contains a reference counted smart
+    // pointer to that mock object, and that is the last reference. So if we
+    // delete our actions within the context of the global mutex we may deadlock
+    // when this method is called again. Instead, make a copy of the set of
+    // actions to delete, clear our set within the mutex, and then delete the
+    // actions outside of the mutex.
+    UntypedOnCallSpecs specs_to_delete;
+    untyped_on_call_specs_.swap(specs_to_delete);
+
+    g_gmock_mutex.Unlock();
+    for (UntypedOnCallSpecs::const_iterator it =
+             specs_to_delete.begin();
+         it != specs_to_delete.end(); ++it) {
+      delete static_cast<const OnCallSpec<F>*>(*it);
+    }
+
+    // Lock the mutex again, since the caller expects it to be locked when we
+    // return.
+    g_gmock_mutex.Lock();
+  }
+
+ protected:
+  template <typename Function>
+  friend class MockSpec;
+
+  typedef ActionResultHolder<Result> ResultHolder;
+
+  // Returns the result of invoking this mock function with the given
+  // arguments.  This function can be safely called from multiple
+  // threads concurrently.
+  Result InvokeWith(const ArgumentTuple& args)
+        GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    scoped_ptr<ResultHolder> holder(
+        DownCast_<ResultHolder*>(this->UntypedInvokeWith(&args)));
+    return holder->Unwrap();
+  }
+
+  // Adds and returns a default action spec for this mock function.
+  OnCallSpec<F>& AddNewOnCallSpec(
+      const char* file, int line,
+      const ArgumentMatcherTuple& m)
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    Mock::RegisterUseByOnCallOrExpectCall(MockObject(), file, line);
+    OnCallSpec<F>* const on_call_spec = new OnCallSpec<F>(file, line, m);
+    untyped_on_call_specs_.push_back(on_call_spec);
+    return *on_call_spec;
+  }
+
+  // Adds and returns an expectation spec for this mock function.
+  TypedExpectation<F>& AddNewExpectation(
+      const char* file,
+      int line,
+      const string& source_text,
+      const ArgumentMatcherTuple& m)
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    Mock::RegisterUseByOnCallOrExpectCall(MockObject(), file, line);
+    TypedExpectation<F>* const expectation =
+        new TypedExpectation<F>(this, file, line, source_text, m);
+    const linked_ptr<ExpectationBase> untyped_expectation(expectation);
+    untyped_expectations_.push_back(untyped_expectation);
+
+    // Adds this expectation into the implicit sequence if there is one.
+    Sequence* const implicit_sequence = g_gmock_implicit_sequence.get();
+    if (implicit_sequence != NULL) {
+      implicit_sequence->AddExpectation(Expectation(untyped_expectation));
+    }
+
+    return *expectation;
+  }
+
+  // The current spec (either default action spec or expectation spec)
+  // being described on this function mocker.
+  MockSpec<F>& current_spec() { return current_spec_; }
+
+ private:
+  template <typename Func> friend class TypedExpectation;
+
+  // Some utilities needed for implementing UntypedInvokeWith().
+
+  // Describes what default action will be performed for the given
+  // arguments.
+  // L = *
+  void DescribeDefaultActionTo(const ArgumentTuple& args,
+                               ::std::ostream* os) const {
+    const OnCallSpec<F>* const spec = FindOnCallSpec(args);
+
+    if (spec == NULL) {
+      *os << (internal::type_equals<Result, void>::value ?
+              "returning directly.\n" :
+              "returning default value.\n");
+    } else {
+      *os << "taking default action specified at:\n"
+          << FormatFileLocation(spec->file(), spec->line()) << "\n";
+    }
+  }
+
+  // Writes a message that the call is uninteresting (i.e. neither
+  // explicitly expected nor explicitly unexpected) to the given
+  // ostream.
+  virtual void UntypedDescribeUninterestingCall(
+      const void* untyped_args,
+      ::std::ostream* os) const
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    const ArgumentTuple& args =
+        *static_cast<const ArgumentTuple*>(untyped_args);
+    *os << "Uninteresting mock function call - ";
+    DescribeDefaultActionTo(args, os);
+    *os << "    Function call: " << Name();
+    UniversalPrint(args, os);
+  }
+
+  // Returns the expectation that matches the given function arguments
+  // (or NULL is there's no match); when a match is found,
+  // untyped_action is set to point to the action that should be
+  // performed (or NULL if the action is "do default"), and
+  // is_excessive is modified to indicate whether the call exceeds the
+  // expected number.
+  //
+  // Critical section: We must find the matching expectation and the
+  // corresponding action that needs to be taken in an ATOMIC
+  // transaction.  Otherwise another thread may call this mock
+  // method in the middle and mess up the state.
+  //
+  // However, performing the action has to be left out of the critical
+  // section.  The reason is that we have no control on what the
+  // action does (it can invoke an arbitrary user function or even a
+  // mock function) and excessive locking could cause a dead lock.
+  virtual const ExpectationBase* UntypedFindMatchingExpectation(
+      const void* untyped_args,
+      const void** untyped_action, bool* is_excessive,
+      ::std::ostream* what, ::std::ostream* why)
+          GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+    const ArgumentTuple& args =
+        *static_cast<const ArgumentTuple*>(untyped_args);
+    MutexLock l(&g_gmock_mutex);
+    TypedExpectation<F>* exp = this->FindMatchingExpectationLocked(args);
+    if (exp == NULL) {  // A match wasn't found.
+      this->FormatUnexpectedCallMessageLocked(args, what, why);
+      return NULL;
+    }
+
+    // This line must be done before calling GetActionForArguments(),
+    // which will increment the call count for *exp and thus affect
+    // its saturation status.
+    *is_excessive = exp->IsSaturated();
+    const Action<F>* action = exp->GetActionForArguments(this, args, what, why);
+    if (action != NULL && action->IsDoDefault())
+      action = NULL;  // Normalize "do default" to NULL.
+    *untyped_action = action;
+    return exp;
+  }
+
+  // Prints the given function arguments to the ostream.
+  virtual void UntypedPrintArgs(const void* untyped_args,
+                                ::std::ostream* os) const {
+    const ArgumentTuple& args =
+        *static_cast<const ArgumentTuple*>(untyped_args);
+    UniversalPrint(args, os);
+  }
+
+  // Returns the expectation that matches the arguments, or NULL if no
+  // expectation matches them.
+  TypedExpectation<F>* FindMatchingExpectationLocked(
+      const ArgumentTuple& args) const
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    for (typename UntypedExpectations::const_reverse_iterator it =
+             untyped_expectations_.rbegin();
+         it != untyped_expectations_.rend(); ++it) {
+      TypedExpectation<F>* const exp =
+          static_cast<TypedExpectation<F>*>(it->get());
+      if (exp->ShouldHandleArguments(args)) {
+        return exp;
+      }
+    }
+    return NULL;
+  }
+
+  // Returns a message that the arguments don't match any expectation.
+  void FormatUnexpectedCallMessageLocked(
+      const ArgumentTuple& args,
+      ::std::ostream* os,
+      ::std::ostream* why) const
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    *os << "\nUnexpected mock function call - ";
+    DescribeDefaultActionTo(args, os);
+    PrintTriedExpectationsLocked(args, why);
+  }
+
+  // Prints a list of expectations that have been tried against the
+  // current mock function call.
+  void PrintTriedExpectationsLocked(
+      const ArgumentTuple& args,
+      ::std::ostream* why) const
+          GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+    g_gmock_mutex.AssertHeld();
+    const int count = static_cast<int>(untyped_expectations_.size());
+    *why << "Google Mock tried the following " << count << " "
+         << (count == 1 ? "expectation, but it didn't match" :
+             "expectations, but none matched")
+         << ":\n";
+    for (int i = 0; i < count; i++) {
+      TypedExpectation<F>* const expectation =
+          static_cast<TypedExpectation<F>*>(untyped_expectations_[i].get());
+      *why << "\n";
+      expectation->DescribeLocationTo(why);
+      if (count > 1) {
+        *why << "tried expectation #" << i << ": ";
+      }
+      *why << expectation->source_text() << "...\n";
+      expectation->ExplainMatchResultTo(args, why);
+      expectation->DescribeCallCountTo(why);
+    }
+  }
+
+  // The current spec (either default action spec or expectation spec)
+  // being described on this function mocker.
+  MockSpec<F> current_spec_;
+
+  // There is no generally useful and implementable semantics of
+  // copying a mock object, so copying a mock is usually a user error.
+  // Thus we disallow copying function mockers.  If the user really
+  // wants to copy a mock object, he should implement his own copy
+  // operation, for example:
+  //
+  //   class MockFoo : public Foo {
+  //    public:
+  //     // Defines a copy constructor explicitly.
+  //     MockFoo(const MockFoo& src) {}
+  //     ...
+  //   };
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(FunctionMockerBase);
+};  // class FunctionMockerBase
+
+#ifdef _MSC_VER
+# pragma warning(pop)  // Restores the warning state.
+#endif  // _MSV_VER
+
+// Implements methods of FunctionMockerBase.
+
+// Verifies that all expectations on this mock function have been
+// satisfied.  Reports one or more Google Test non-fatal failures and
+// returns false if not.
+
+// Reports an uninteresting call (whose description is in msg) in the
+// manner specified by 'reaction'.
+void ReportUninterestingCall(CallReaction reaction, const string& msg);
+
+}  // namespace internal
+
+// The style guide prohibits "using" statements in a namespace scope
+// inside a header file.  However, the MockSpec class template is
+// meant to be defined in the ::testing namespace.  The following line
+// is just a trick for working around a bug in MSVC 8.0, which cannot
+// handle it if we define MockSpec in ::testing.
+using internal::MockSpec;
+
+// Const(x) is a convenient function for obtaining a const reference
+// to x.  This is useful for setting expectations on an overloaded
+// const mock method, e.g.
+//
+//   class MockFoo : public FooInterface {
+//    public:
+//     MOCK_METHOD0(Bar, int());
+//     MOCK_CONST_METHOD0(Bar, int&());
+//   };
+//
+//   MockFoo foo;
+//   // Expects a call to non-const MockFoo::Bar().
+//   EXPECT_CALL(foo, Bar());
+//   // Expects a call to const MockFoo::Bar().
+//   EXPECT_CALL(Const(foo), Bar());
+template <typename T>
+inline const T& Const(const T& x) { return x; }
+
+// Constructs an Expectation object that references and co-owns exp.
+inline Expectation::Expectation(internal::ExpectationBase& exp)  // NOLINT
+    : expectation_base_(exp.GetHandle().expectation_base()) {}
+
+}  // namespace testing
+
+// A separate macro is required to avoid compile errors when the name
+// of the method used in call is a result of macro expansion.
+// See CompilesWithMethodNameExpandedFromMacro tests in
+// internal/gmock-spec-builders_test.cc for more details.
+#define GMOCK_ON_CALL_IMPL_(obj, call) \
+    ((obj).gmock_##call).InternalDefaultActionSetAt(__FILE__, __LINE__, \
+                                                    #obj, #call)
+#define ON_CALL(obj, call) GMOCK_ON_CALL_IMPL_(obj, call)
+
+#define GMOCK_EXPECT_CALL_IMPL_(obj, call) \
+    ((obj).gmock_##call).InternalExpectedAt(__FILE__, __LINE__, #obj, #call)
+#define EXPECT_CALL(obj, call) GMOCK_EXPECT_CALL_IMPL_(obj, call)
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+
+#if GTEST_HAS_STD_FUNCTION_
+# include <functional>
+#endif
+
+namespace testing {
+namespace internal {
+
+template <typename F>
+class FunctionMockerBase;
+
+// Note: class FunctionMocker really belongs to the ::testing
+// namespace.  However if we define it in ::testing, MSVC will
+// complain when classes in ::testing::internal declare it as a
+// friend class template.  To workaround this compiler bug, we define
+// FunctionMocker in ::testing::internal and import it into ::testing.
+template <typename F>
+class FunctionMocker;
+
+template <typename R>
+class FunctionMocker<R()> : public
+    internal::FunctionMockerBase<R()> {
+ public:
+  typedef R F();
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With() {
+    return this->current_spec();
+  }
+
+  R Invoke() {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple());
+  }
+};
+
+template <typename R, typename A1>
+class FunctionMocker<R(A1)> : public
+    internal::FunctionMockerBase<R(A1)> {
+ public:
+  typedef R F(A1);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1));
+  }
+};
+
+template <typename R, typename A1, typename A2>
+class FunctionMocker<R(A1, A2)> : public
+    internal::FunctionMockerBase<R(A1, A2)> {
+ public:
+  typedef R F(A1, A2);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class FunctionMocker<R(A1, A2, A3)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3)> {
+ public:
+  typedef R F(A1, A2, A3);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class FunctionMocker<R(A1, A2, A3, A4)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4)> {
+ public:
+  typedef R F(A1, A2, A3, A4);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5>
+class FunctionMocker<R(A1, A2, A3, A4, A5)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5, A6);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+      const Matcher<A6>& m6) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5,
+        m6));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5, A6, A7);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+      const Matcher<A6>& m6, const Matcher<A7>& m7) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5,
+        m6, m7));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5, A6, A7, A8);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+      const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5,
+        m6, m7, m8));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+      const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8,
+      const Matcher<A9>& m9) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5,
+        m6, m7, m8, m9));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9));
+  }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+    typename A5, typename A6, typename A7, typename A8, typename A9,
+    typename A10>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> : public
+    internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> {
+ public:
+  typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10);
+  typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+  MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+      const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+      const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8,
+      const Matcher<A9>& m9, const Matcher<A10>& m10) {
+    this->current_spec().SetMatchers(::testing::make_tuple(m1, m2, m3, m4, m5,
+        m6, m7, m8, m9, m10));
+    return this->current_spec();
+  }
+
+  R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9,
+      A10 a10) {
+    // Even though gcc and MSVC don't enforce it, 'this->' is required
+    // by the C++ standard [14.6.4] here, as the base class type is
+    // dependent on the template argument (and thus shouldn't be
+    // looked into when resolving InvokeWith).
+    return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9,
+        a10));
+  }
+};
+
+}  // namespace internal
+
+// The style guide prohibits "using" statements in a namespace scope
+// inside a header file.  However, the FunctionMocker class template
+// is meant to be defined in the ::testing namespace.  The following
+// line is just a trick for working around a bug in MSVC 8.0, which
+// cannot handle it if we define FunctionMocker in ::testing.
+using internal::FunctionMocker;
+
+// GMOCK_RESULT_(tn, F) expands to the result type of function type F.
+// We define this as a variadic macro in case F contains unprotected
+// commas (the same reason that we use variadic macros in other places
+// in this file).
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_RESULT_(tn, ...) \
+    tn ::testing::internal::Function<__VA_ARGS__>::Result
+
+// The type of argument N of the given function type.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_ARG_(tn, N, ...) \
+    tn ::testing::internal::Function<__VA_ARGS__>::Argument##N
+
+// The matcher type for argument N of the given function type.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_MATCHER_(tn, N, ...) \
+    const ::testing::Matcher<GMOCK_ARG_(tn, N, __VA_ARGS__)>&
+
+// The variable for mocking the given method.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_MOCKER_(arity, constness, Method) \
+    GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD0_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      ) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 0), \
+        this_method_does_not_take_0_arguments); \
+    GMOCK_MOCKER_(0, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(0, constness, Method).Invoke(); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method() constness { \
+    GMOCK_MOCKER_(0, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(0, constness, Method).With(); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(0, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD1_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 1), \
+        this_method_does_not_take_1_argument); \
+    GMOCK_MOCKER_(1, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(1, constness, Method).Invoke(gmock_a1); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1) constness { \
+    GMOCK_MOCKER_(1, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(1, constness, Method).With(gmock_a1); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(1, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD2_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 2), \
+        this_method_does_not_take_2_arguments); \
+    GMOCK_MOCKER_(2, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(2, constness, Method).Invoke(gmock_a1, gmock_a2); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2) constness { \
+    GMOCK_MOCKER_(2, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(2, constness, Method).With(gmock_a1, gmock_a2); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(2, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD3_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 3), \
+        this_method_does_not_take_3_arguments); \
+    GMOCK_MOCKER_(3, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(3, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3) constness { \
+    GMOCK_MOCKER_(3, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(3, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(3, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD4_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 4), \
+        this_method_does_not_take_4_arguments); \
+    GMOCK_MOCKER_(4, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(4, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4) constness { \
+    GMOCK_MOCKER_(4, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(4, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(4, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD5_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 5), \
+        this_method_does_not_take_5_arguments); \
+    GMOCK_MOCKER_(5, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(5, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5) constness { \
+    GMOCK_MOCKER_(5, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(5, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(5, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD6_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \
+      GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 6), \
+        this_method_does_not_take_6_arguments); \
+    GMOCK_MOCKER_(6, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(6, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \
+                     GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6) constness { \
+    GMOCK_MOCKER_(6, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(6, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(6, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD7_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \
+      GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \
+      GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 7), \
+        this_method_does_not_take_7_arguments); \
+    GMOCK_MOCKER_(7, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(7, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \
+                     GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \
+                     GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7) constness { \
+    GMOCK_MOCKER_(7, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(7, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(7, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD8_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \
+      GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \
+      GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \
+      GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 8), \
+        this_method_does_not_take_8_arguments); \
+    GMOCK_MOCKER_(8, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(8, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \
+                     GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \
+                     GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \
+                     GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8) constness { \
+    GMOCK_MOCKER_(8, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(8, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(8, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD9_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \
+      GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \
+      GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \
+      GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8, \
+      GMOCK_ARG_(tn, 9, __VA_ARGS__) gmock_a9) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 9), \
+        this_method_does_not_take_9_arguments); \
+    GMOCK_MOCKER_(9, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(9, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \
+        gmock_a9); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \
+                     GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \
+                     GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \
+                     GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \
+                     GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9) constness { \
+    GMOCK_MOCKER_(9, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(9, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \
+        gmock_a9); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(9, constness, \
+      Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD10_(tn, constness, ct, Method, ...) \
+  GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \
+      GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \
+      GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \
+      GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \
+      GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \
+      GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \
+      GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \
+      GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \
+      GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8, \
+      GMOCK_ARG_(tn, 9, __VA_ARGS__) gmock_a9, \
+      GMOCK_ARG_(tn, 10, __VA_ARGS__) gmock_a10) constness { \
+    GTEST_COMPILE_ASSERT_((::testing::tuple_size<                          \
+        tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \
+            == 10), \
+        this_method_does_not_take_10_arguments); \
+    GMOCK_MOCKER_(10, constness, Method).SetOwnerAndName(this, #Method); \
+    return GMOCK_MOCKER_(10, constness, Method).Invoke(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \
+        gmock_a10); \
+  } \
+  ::testing::MockSpec<__VA_ARGS__>& \
+      gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \
+                     GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \
+                     GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \
+                     GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \
+                     GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \
+                     GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \
+                     GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \
+                     GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \
+                     GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9, \
+                     GMOCK_MATCHER_(tn, 10, \
+                         __VA_ARGS__) gmock_a10) constness { \
+    GMOCK_MOCKER_(10, constness, Method).RegisterOwner(this); \
+    return GMOCK_MOCKER_(10, constness, Method).With(gmock_a1, gmock_a2, \
+        gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \
+        gmock_a10); \
+  } \
+  mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(10, constness, \
+      Method)
+
+#define MOCK_METHOD0(m, ...) GMOCK_METHOD0_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD1(m, ...) GMOCK_METHOD1_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD2(m, ...) GMOCK_METHOD2_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD3(m, ...) GMOCK_METHOD3_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD4(m, ...) GMOCK_METHOD4_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD5(m, ...) GMOCK_METHOD5_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD6(m, ...) GMOCK_METHOD6_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD7(m, ...) GMOCK_METHOD7_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD8(m, ...) GMOCK_METHOD8_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD9(m, ...) GMOCK_METHOD9_(, , , m, __VA_ARGS__)
+#define MOCK_METHOD10(m, ...) GMOCK_METHOD10_(, , , m, __VA_ARGS__)
+
+#define MOCK_CONST_METHOD0(m, ...) GMOCK_METHOD0_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD1(m, ...) GMOCK_METHOD1_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD2(m, ...) GMOCK_METHOD2_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD3(m, ...) GMOCK_METHOD3_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD4(m, ...) GMOCK_METHOD4_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD5(m, ...) GMOCK_METHOD5_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD6(m, ...) GMOCK_METHOD6_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD7(m, ...) GMOCK_METHOD7_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD8(m, ...) GMOCK_METHOD8_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD9(m, ...) GMOCK_METHOD9_(, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD10(m, ...) GMOCK_METHOD10_(, const, , m, __VA_ARGS__)
+
+#define MOCK_METHOD0_T(m, ...) GMOCK_METHOD0_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD1_T(m, ...) GMOCK_METHOD1_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD2_T(m, ...) GMOCK_METHOD2_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD3_T(m, ...) GMOCK_METHOD3_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD4_T(m, ...) GMOCK_METHOD4_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD5_T(m, ...) GMOCK_METHOD5_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD6_T(m, ...) GMOCK_METHOD6_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD7_T(m, ...) GMOCK_METHOD7_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD8_T(m, ...) GMOCK_METHOD8_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD9_T(m, ...) GMOCK_METHOD9_(typename, , , m, __VA_ARGS__)
+#define MOCK_METHOD10_T(m, ...) GMOCK_METHOD10_(typename, , , m, __VA_ARGS__)
+
+#define MOCK_CONST_METHOD0_T(m, ...) \
+    GMOCK_METHOD0_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD1_T(m, ...) \
+    GMOCK_METHOD1_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD2_T(m, ...) \
+    GMOCK_METHOD2_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD3_T(m, ...) \
+    GMOCK_METHOD3_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD4_T(m, ...) \
+    GMOCK_METHOD4_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD5_T(m, ...) \
+    GMOCK_METHOD5_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD6_T(m, ...) \
+    GMOCK_METHOD6_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD7_T(m, ...) \
+    GMOCK_METHOD7_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD8_T(m, ...) \
+    GMOCK_METHOD8_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD9_T(m, ...) \
+    GMOCK_METHOD9_(typename, const, , m, __VA_ARGS__)
+#define MOCK_CONST_METHOD10_T(m, ...) \
+    GMOCK_METHOD10_(typename, const, , m, __VA_ARGS__)
+
+#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD0_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD1_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD2_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD3_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD4_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD5_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD6_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD7_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD8_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD9_(, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD10_(, , ct, m, __VA_ARGS__)
+
+#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD0_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD1_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD2_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD3_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD4_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD5_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD6_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD7_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD8_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD9_(, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD10_(, const, ct, m, __VA_ARGS__)
+
+#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD0_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD1_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD2_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD3_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD4_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD5_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD6_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD7_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD8_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD9_(typename, , ct, m, __VA_ARGS__)
+#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD10_(typename, , ct, m, __VA_ARGS__)
+
+#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD0_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD1_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD2_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD3_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD4_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD5_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD6_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD7_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD8_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD9_(typename, const, ct, m, __VA_ARGS__)
+#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \
+    GMOCK_METHOD10_(typename, const, ct, m, __VA_ARGS__)
+
+// A MockFunction<F> class has one mock method whose type is F.  It is
+// useful when you just want your test code to emit some messages and
+// have Google Mock verify the right messages are sent (and perhaps at
+// the right times).  For example, if you are exercising code:
+//
+//   Foo(1);
+//   Foo(2);
+//   Foo(3);
+//
+// and want to verify that Foo(1) and Foo(3) both invoke
+// mock.Bar("a"), but Foo(2) doesn't invoke anything, you can write:
+//
+// TEST(FooTest, InvokesBarCorrectly) {
+//   MyMock mock;
+//   MockFunction<void(string check_point_name)> check;
+//   {
+//     InSequence s;
+//
+//     EXPECT_CALL(mock, Bar("a"));
+//     EXPECT_CALL(check, Call("1"));
+//     EXPECT_CALL(check, Call("2"));
+//     EXPECT_CALL(mock, Bar("a"));
+//   }
+//   Foo(1);
+//   check.Call("1");
+//   Foo(2);
+//   check.Call("2");
+//   Foo(3);
+// }
+//
+// The expectation spec says that the first Bar("a") must happen
+// before check point "1", the second Bar("a") must happen after check
+// point "2", and nothing should happen between the two check
+// points. The explicit check points make it easy to tell which
+// Bar("a") is called by which call to Foo().
+//
+// MockFunction<F> can also be used to exercise code that accepts
+// std::function<F> callbacks. To do so, use AsStdFunction() method
+// to create std::function proxy forwarding to original object's Call.
+// Example:
+//
+// TEST(FooTest, RunsCallbackWithBarArgument) {
+//   MockFunction<int(string)> callback;
+//   EXPECT_CALL(callback, Call("bar")).WillOnce(Return(1));
+//   Foo(callback.AsStdFunction());
+// }
+template <typename F>
+class MockFunction;
+
+template <typename R>
+class MockFunction<R()> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD0_T(Call, R());
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R()> AsStdFunction() {
+    return [this]() -> R {
+      return this->Call();
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0>
+class MockFunction<R(A0)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD1_T(Call, R(A0));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0)> AsStdFunction() {
+    return [this](A0 a0) -> R {
+      return this->Call(a0);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1>
+class MockFunction<R(A0, A1)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD2_T(Call, R(A0, A1));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1)> AsStdFunction() {
+    return [this](A0 a0, A1 a1) -> R {
+      return this->Call(a0, a1);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2>
+class MockFunction<R(A0, A1, A2)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD3_T(Call, R(A0, A1, A2));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2) -> R {
+      return this->Call(a0, a1, a2);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3>
+class MockFunction<R(A0, A1, A2, A3)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD4_T(Call, R(A0, A1, A2, A3));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3) -> R {
+      return this->Call(a0, a1, a2, a3);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4>
+class MockFunction<R(A0, A1, A2, A3, A4)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD5_T(Call, R(A0, A1, A2, A3, A4));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) -> R {
+      return this->Call(a0, a1, a2, a3, a4);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4, typename A5>
+class MockFunction<R(A0, A1, A2, A3, A4, A5)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD6_T(Call, R(A0, A1, A2, A3, A4, A5));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4, A5)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) -> R {
+      return this->Call(a0, a1, a2, a3, a4, a5);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD7_T(Call, R(A0, A1, A2, A3, A4, A5, A6));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4, A5, A6)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) -> R {
+      return this->Call(a0, a1, a2, a3, a4, a5, a6);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD8_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4, A5, A6, A7)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) -> R {
+      return this->Call(a0, a1, a2, a3, a4, a5, a6, a7);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7, typename A8>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7, A8)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD9_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4, A5, A6, A7, A8)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7,
+        A8 a8) -> R {
+      return this->Call(a0, a1, a2, a3, a4, a5, a6, a7, a8);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+    typename A4, typename A5, typename A6, typename A7, typename A8,
+    typename A9>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+ public:
+  MockFunction() {}
+
+  MOCK_METHOD10_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9));
+
+#if GTEST_HAS_STD_FUNCTION_
+  std::function<R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9)> AsStdFunction() {
+    return [this](A0 a0, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7,
+        A8 a8, A9 a9) -> R {
+      return this->Call(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+    };
+  }
+#endif  // GTEST_HAS_STD_FUNCTION_
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+// This file was GENERATED by command:
+//     pump.py gmock-generated-nice-strict.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements class templates NiceMock, NaggyMock, and StrictMock.
+//
+// Given a mock class MockFoo that is created using Google Mock,
+// NiceMock<MockFoo> is a subclass of MockFoo that allows
+// uninteresting calls (i.e. calls to mock methods that have no
+// EXPECT_CALL specs), NaggyMock<MockFoo> is a subclass of MockFoo
+// that prints a warning when an uninteresting call occurs, and
+// StrictMock<MockFoo> is a subclass of MockFoo that treats all
+// uninteresting calls as errors.
+//
+// Currently a mock is naggy by default, so MockFoo and
+// NaggyMock<MockFoo> behave like the same.  However, we will soon
+// switch the default behavior of mocks to be nice, as that in general
+// leads to more maintainable tests.  When that happens, MockFoo will
+// stop behaving like NaggyMock<MockFoo> and start behaving like
+// NiceMock<MockFoo>.
+//
+// NiceMock, NaggyMock, and StrictMock "inherit" the constructors of
+// their respective base class, with up-to 10 arguments.  Therefore
+// you can write NiceMock<MockFoo>(5, "a") to construct a nice mock
+// where MockFoo has a constructor that accepts (int, const char*),
+// for example.
+//
+// A known limitation is that NiceMock<MockFoo>, NaggyMock<MockFoo>,
+// and StrictMock<MockFoo> only works for mock methods defined using
+// the MOCK_METHOD* family of macros DIRECTLY in the MockFoo class.
+// If a mock method is defined in a base class of MockFoo, the "nice"
+// or "strict" modifier may not affect it, depending on the compiler.
+// In particular, nesting NiceMock, NaggyMock, and StrictMock is NOT
+// supported.
+//
+// Another known limitation is that the constructors of the base mock
+// cannot have arguments passed by non-const reference, which are
+// banned by the Google C++ style guide anyway.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+
+
+namespace testing {
+
+template <class MockClass>
+class NiceMock : public MockClass {
+ public:
+  // We don't factor out the constructor body to a common method, as
+  // we have to avoid a possible clash with members of MockClass.
+  NiceMock() {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  // C++ doesn't (yet) allow inheritance of constructors, so we have
+  // to define it for each arity.
+  template <typename A1>
+  explicit NiceMock(const A1& a1) : MockClass(a1) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+  template <typename A1, typename A2>
+  NiceMock(const A1& a1, const A2& a2) : MockClass(a1, a2) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3,
+      const A4& a4) : MockClass(a1, a2, a3, a4) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5) : MockClass(a1, a2, a3, a4, a5) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5,
+      a6, a7) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1,
+      a2, a3, a4, a5, a6, a7, a8) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8,
+      const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9, typename A10>
+  NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9,
+      const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) {
+    ::testing::Mock::AllowUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  virtual ~NiceMock() {
+    ::testing::Mock::UnregisterCallReaction(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(NiceMock);
+};
+
+template <class MockClass>
+class NaggyMock : public MockClass {
+ public:
+  // We don't factor out the constructor body to a common method, as
+  // we have to avoid a possible clash with members of MockClass.
+  NaggyMock() {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  // C++ doesn't (yet) allow inheritance of constructors, so we have
+  // to define it for each arity.
+  template <typename A1>
+  explicit NaggyMock(const A1& a1) : MockClass(a1) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+  template <typename A1, typename A2>
+  NaggyMock(const A1& a1, const A2& a2) : MockClass(a1, a2) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3,
+      const A4& a4) : MockClass(a1, a2, a3, a4) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5) : MockClass(a1, a2, a3, a4, a5) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5,
+      a6, a7) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1,
+      a2, a3, a4, a5, a6, a7, a8) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8,
+      const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9, typename A10>
+  NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9,
+      const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) {
+    ::testing::Mock::WarnUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  virtual ~NaggyMock() {
+    ::testing::Mock::UnregisterCallReaction(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(NaggyMock);
+};
+
+template <class MockClass>
+class StrictMock : public MockClass {
+ public:
+  // We don't factor out the constructor body to a common method, as
+  // we have to avoid a possible clash with members of MockClass.
+  StrictMock() {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  // C++ doesn't (yet) allow inheritance of constructors, so we have
+  // to define it for each arity.
+  template <typename A1>
+  explicit StrictMock(const A1& a1) : MockClass(a1) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+  template <typename A1, typename A2>
+  StrictMock(const A1& a1, const A2& a2) : MockClass(a1, a2) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3,
+      const A4& a4) : MockClass(a1, a2, a3, a4) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5) : MockClass(a1, a2, a3, a4, a5) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5,
+      a6, a7) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1,
+      a2, a3, a4, a5, a6, a7, a8) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8,
+      const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  template <typename A1, typename A2, typename A3, typename A4, typename A5,
+      typename A6, typename A7, typename A8, typename A9, typename A10>
+  StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+      const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9,
+      const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) {
+    ::testing::Mock::FailUninterestingCalls(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+  virtual ~StrictMock() {
+    ::testing::Mock::UnregisterCallReaction(
+        internal::ImplicitCast_<MockClass*>(this));
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(StrictMock);
+};
+
+// The following specializations catch some (relatively more common)
+// user errors of nesting nice and strict mocks.  They do NOT catch
+// all possible errors.
+
+// These specializations are declared but not defined, as NiceMock,
+// NaggyMock, and StrictMock cannot be nested.
+
+template <typename MockClass>
+class NiceMock<NiceMock<MockClass> >;
+template <typename MockClass>
+class NiceMock<NaggyMock<MockClass> >;
+template <typename MockClass>
+class NiceMock<StrictMock<MockClass> >;
+
+template <typename MockClass>
+class NaggyMock<NiceMock<MockClass> >;
+template <typename MockClass>
+class NaggyMock<NaggyMock<MockClass> >;
+template <typename MockClass>
+class NaggyMock<StrictMock<MockClass> >;
+
+template <typename MockClass>
+class StrictMock<NiceMock<MockClass> >;
+template <typename MockClass>
+class StrictMock<NaggyMock<MockClass> >;
+template <typename MockClass>
+class StrictMock<StrictMock<MockClass> >;
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+// This file was GENERATED by command:
+//     pump.py gmock-generated-matchers.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used variadic matchers.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace testing {
+namespace internal {
+
+// The type of the i-th (0-based) field of Tuple.
+#define GMOCK_FIELD_TYPE_(Tuple, i) \
+    typename ::testing::tuple_element<i, Tuple>::type
+
+// TupleFields<Tuple, k0, ..., kn> is for selecting fields from a
+// tuple of type Tuple.  It has two members:
+//
+//   type: a tuple type whose i-th field is the ki-th field of Tuple.
+//   GetSelectedFields(t): returns fields k0, ..., and kn of t as a tuple.
+//
+// For example, in class TupleFields<tuple<bool, char, int>, 2, 0>, we have:
+//
+//   type is tuple<int, bool>, and
+//   GetSelectedFields(make_tuple(true, 'a', 42)) is (42, true).
+
+template <class Tuple, int k0 = -1, int k1 = -1, int k2 = -1, int k3 = -1,
+    int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+    int k9 = -1>
+class TupleFields;
+
+// This generic version is used when there are 10 selectors.
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+    int k7, int k8, int k9>
+class TupleFields {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+      GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+      GMOCK_FIELD_TYPE_(Tuple, k7), GMOCK_FIELD_TYPE_(Tuple, k8),
+      GMOCK_FIELD_TYPE_(Tuple, k9)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+        get<k5>(t), get<k6>(t), get<k7>(t), get<k8>(t), get<k9>(t));
+  }
+};
+
+// The following specialization is used for 0 ~ 9 selectors.
+
+template <class Tuple>
+class TupleFields<Tuple, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<> type;
+  static type GetSelectedFields(const Tuple& /* t */) {
+    return type();
+  }
+};
+
+template <class Tuple, int k0>
+class TupleFields<Tuple, k0, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1>
+class TupleFields<Tuple, k0, k1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2>
+class TupleFields<Tuple, k0, k1, k2, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3>
+class TupleFields<Tuple, k0, k1, k2, k3, -1, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, -1, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, -1, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+      GMOCK_FIELD_TYPE_(Tuple, k5)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+        get<k5>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, -1, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+      GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+        get<k5>(t), get<k6>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+    int k7>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, k7, -1, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+      GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+      GMOCK_FIELD_TYPE_(Tuple, k7)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+        get<k5>(t), get<k6>(t), get<k7>(t));
+  }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+    int k7, int k8>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, k7, k8, -1> {
+ public:
+  typedef ::testing::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+      GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+      GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+      GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+      GMOCK_FIELD_TYPE_(Tuple, k7), GMOCK_FIELD_TYPE_(Tuple, k8)> type;
+  static type GetSelectedFields(const Tuple& t) {
+    return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+        get<k5>(t), get<k6>(t), get<k7>(t), get<k8>(t));
+  }
+};
+
+#undef GMOCK_FIELD_TYPE_
+
+// Implements the Args() matcher.
+template <class ArgsTuple, int k0 = -1, int k1 = -1, int k2 = -1, int k3 = -1,
+    int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+    int k9 = -1>
+class ArgsMatcherImpl : public MatcherInterface<ArgsTuple> {
+ public:
+  // ArgsTuple may have top-level const or reference modifiers.
+  typedef GTEST_REMOVE_REFERENCE_AND_CONST_(ArgsTuple) RawArgsTuple;
+  typedef typename internal::TupleFields<RawArgsTuple, k0, k1, k2, k3, k4, k5,
+      k6, k7, k8, k9>::type SelectedArgs;
+  typedef Matcher<const SelectedArgs&> MonomorphicInnerMatcher;
+
+  template <typename InnerMatcher>
+  explicit ArgsMatcherImpl(const InnerMatcher& inner_matcher)
+      : inner_matcher_(SafeMatcherCast<const SelectedArgs&>(inner_matcher)) {}
+
+  virtual bool MatchAndExplain(ArgsTuple args,
+                               MatchResultListener* listener) const {
+    const SelectedArgs& selected_args = GetSelectedArgs(args);
+    if (!listener->IsInterested())
+      return inner_matcher_.Matches(selected_args);
+
+    PrintIndices(listener->stream());
+    *listener << "are " << PrintToString(selected_args);
+
+    StringMatchResultListener inner_listener;
+    const bool match = inner_matcher_.MatchAndExplain(selected_args,
+                                                      &inner_listener);
+    PrintIfNotEmpty(inner_listener.str(), listener->stream());
+    return match;
+  }
+
+  virtual void DescribeTo(::std::ostream* os) const {
+    *os << "are a tuple ";
+    PrintIndices(os);
+    inner_matcher_.DescribeTo(os);
+  }
+
+  virtual void DescribeNegationTo(::std::ostream* os) const {
+    *os << "are a tuple ";
+    PrintIndices(os);
+    inner_matcher_.DescribeNegationTo(os);
+  }
+
+ private:
+  static SelectedArgs GetSelectedArgs(ArgsTuple args) {
+    return TupleFields<RawArgsTuple, k0, k1, k2, k3, k4, k5, k6, k7, k8,
+        k9>::GetSelectedFields(args);
+  }
+
+  // Prints the indices of the selected fields.
+  static void PrintIndices(::std::ostream* os) {
+    *os << "whose fields (";
+    const int indices[10] = { k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 };
+    for (int i = 0; i < 10; i++) {
+      if (indices[i] < 0)
+        break;
+
+      if (i >= 1)
+        *os << ", ";
+
+      *os << "#" << indices[i];
+    }
+    *os << ") ";
+  }
+
+  const MonomorphicInnerMatcher inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(ArgsMatcherImpl);
+};
+
+template <class InnerMatcher, int k0 = -1, int k1 = -1, int k2 = -1,
+    int k3 = -1, int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1,
+    int k8 = -1, int k9 = -1>
+class ArgsMatcher {
+ public:
+  explicit ArgsMatcher(const InnerMatcher& inner_matcher)
+      : inner_matcher_(inner_matcher) {}
+
+  template <typename ArgsTuple>
+  operator Matcher<ArgsTuple>() const {
+    return MakeMatcher(new ArgsMatcherImpl<ArgsTuple, k0, k1, k2, k3, k4, k5,
+        k6, k7, k8, k9>(inner_matcher_));
+  }
+
+ private:
+  const InnerMatcher inner_matcher_;
+
+  GTEST_DISALLOW_ASSIGN_(ArgsMatcher);
+};
+
+// A set of metafunctions for computing the result type of AllOf.
+// AllOf(m1, ..., mN) returns
+// AllOfResultN<decltype(m1), ..., decltype(mN)>::type.
+
+// Although AllOf isn't defined for one argument, AllOfResult1 is defined
+// to simplify the implementation.
+template <typename M1>
+struct AllOfResult1 {
+  typedef M1 type;
+};
+
+template <typename M1, typename M2>
+struct AllOfResult2 {
+  typedef BothOfMatcher<
+      typename AllOfResult1<M1>::type,
+      typename AllOfResult1<M2>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3>
+struct AllOfResult3 {
+  typedef BothOfMatcher<
+      typename AllOfResult1<M1>::type,
+      typename AllOfResult2<M2, M3>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4>
+struct AllOfResult4 {
+  typedef BothOfMatcher<
+      typename AllOfResult2<M1, M2>::type,
+      typename AllOfResult2<M3, M4>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5>
+struct AllOfResult5 {
+  typedef BothOfMatcher<
+      typename AllOfResult2<M1, M2>::type,
+      typename AllOfResult3<M3, M4, M5>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6>
+struct AllOfResult6 {
+  typedef BothOfMatcher<
+      typename AllOfResult3<M1, M2, M3>::type,
+      typename AllOfResult3<M4, M5, M6>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7>
+struct AllOfResult7 {
+  typedef BothOfMatcher<
+      typename AllOfResult3<M1, M2, M3>::type,
+      typename AllOfResult4<M4, M5, M6, M7>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8>
+struct AllOfResult8 {
+  typedef BothOfMatcher<
+      typename AllOfResult4<M1, M2, M3, M4>::type,
+      typename AllOfResult4<M5, M6, M7, M8>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9>
+struct AllOfResult9 {
+  typedef BothOfMatcher<
+      typename AllOfResult4<M1, M2, M3, M4>::type,
+      typename AllOfResult5<M5, M6, M7, M8, M9>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9, typename M10>
+struct AllOfResult10 {
+  typedef BothOfMatcher<
+      typename AllOfResult5<M1, M2, M3, M4, M5>::type,
+      typename AllOfResult5<M6, M7, M8, M9, M10>::type
+  > type;
+};
+
+// A set of metafunctions for computing the result type of AnyOf.
+// AnyOf(m1, ..., mN) returns
+// AnyOfResultN<decltype(m1), ..., decltype(mN)>::type.
+
+// Although AnyOf isn't defined for one argument, AnyOfResult1 is defined
+// to simplify the implementation.
+template <typename M1>
+struct AnyOfResult1 {
+  typedef M1 type;
+};
+
+template <typename M1, typename M2>
+struct AnyOfResult2 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult1<M1>::type,
+      typename AnyOfResult1<M2>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3>
+struct AnyOfResult3 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult1<M1>::type,
+      typename AnyOfResult2<M2, M3>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4>
+struct AnyOfResult4 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult2<M1, M2>::type,
+      typename AnyOfResult2<M3, M4>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5>
+struct AnyOfResult5 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult2<M1, M2>::type,
+      typename AnyOfResult3<M3, M4, M5>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6>
+struct AnyOfResult6 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult3<M1, M2, M3>::type,
+      typename AnyOfResult3<M4, M5, M6>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7>
+struct AnyOfResult7 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult3<M1, M2, M3>::type,
+      typename AnyOfResult4<M4, M5, M6, M7>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8>
+struct AnyOfResult8 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult4<M1, M2, M3, M4>::type,
+      typename AnyOfResult4<M5, M6, M7, M8>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9>
+struct AnyOfResult9 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult4<M1, M2, M3, M4>::type,
+      typename AnyOfResult5<M5, M6, M7, M8, M9>::type
+  > type;
+};
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9, typename M10>
+struct AnyOfResult10 {
+  typedef EitherOfMatcher<
+      typename AnyOfResult5<M1, M2, M3, M4, M5>::type,
+      typename AnyOfResult5<M6, M7, M8, M9, M10>::type
+  > type;
+};
+
+}  // namespace internal
+
+// Args<N1, N2, ..., Nk>(a_matcher) matches a tuple if the selected
+// fields of it matches a_matcher.  C++ doesn't support default
+// arguments for function templates, so we have to overload it.
+template <typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher>(matcher);
+}
+
+template <int k1, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1>(matcher);
+}
+
+template <int k1, int k2, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2>(matcher);
+}
+
+template <int k1, int k2, int k3, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7,
+    typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6,
+      k7>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7,
+      k8>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    int k9, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8, k9>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8,
+      k9>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+    int k9, int k10, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8, k9,
+    k10>
+Args(const InnerMatcher& matcher) {
+  return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8,
+      k9, k10>(matcher);
+}
+
+// ElementsAre(e_1, e_2, ... e_n) matches an STL-style container with
+// n elements, where the i-th element in the container must
+// match the i-th argument in the list.  Each argument of
+// ElementsAre() can be either a value or a matcher.  We support up to
+// 10 arguments.
+//
+// The use of DecayArray in the implementation allows ElementsAre()
+// to accept string literals, whose type is const char[N], but we
+// want to treat them as const char*.
+//
+// NOTE: Since ElementsAre() cares about the order of the elements, it
+// must not be used with containers whose elements's order is
+// undefined (e.g. hash_map).
+
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<> >
+ElementsAre() {
+  typedef ::testing::tuple<> Args;
+  return internal::ElementsAreMatcher<Args>(Args());
+}
+
+template <typename T1>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type> >
+ElementsAre(const T1& e1) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1));
+}
+
+template <typename T1, typename T2>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type> >
+ElementsAre(const T1& e1, const T2& e2) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2));
+}
+
+template <typename T1, typename T2, typename T3>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3));
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5, e6));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5, e6, e7));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5, e6, e7,
+      e8));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type,
+        typename internal::DecayArray<T9>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type,
+      typename internal::DecayArray<T9>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5, e6, e7,
+      e8, e9));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+inline internal::ElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type,
+        typename internal::DecayArray<T9>::type,
+        typename internal::DecayArray<T10>::type> >
+ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9,
+    const T10& e10) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type,
+      typename internal::DecayArray<T9>::type,
+      typename internal::DecayArray<T10>::type> Args;
+  return internal::ElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5, e6, e7,
+      e8, e9, e10));
+}
+
+// UnorderedElementsAre(e_1, e_2, ..., e_n) is an ElementsAre extension
+// that matches n elements in any order.  We support up to n=10 arguments.
+
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<> >
+UnorderedElementsAre() {
+  typedef ::testing::tuple<> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args());
+}
+
+template <typename T1>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type> >
+UnorderedElementsAre(const T1& e1) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1));
+}
+
+template <typename T1, typename T2>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2));
+}
+
+template <typename T1, typename T2, typename T3>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3));
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5,
+      e6));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5,
+      e6, e7));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5,
+      e6, e7, e8));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type,
+        typename internal::DecayArray<T9>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type,
+      typename internal::DecayArray<T9>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5,
+      e6, e7, e8, e9));
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+inline internal::UnorderedElementsAreMatcher<
+    ::testing::tuple<
+        typename internal::DecayArray<T1>::type,
+        typename internal::DecayArray<T2>::type,
+        typename internal::DecayArray<T3>::type,
+        typename internal::DecayArray<T4>::type,
+        typename internal::DecayArray<T5>::type,
+        typename internal::DecayArray<T6>::type,
+        typename internal::DecayArray<T7>::type,
+        typename internal::DecayArray<T8>::type,
+        typename internal::DecayArray<T9>::type,
+        typename internal::DecayArray<T10>::type> >
+UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+    const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9,
+    const T10& e10) {
+  typedef ::testing::tuple<
+      typename internal::DecayArray<T1>::type,
+      typename internal::DecayArray<T2>::type,
+      typename internal::DecayArray<T3>::type,
+      typename internal::DecayArray<T4>::type,
+      typename internal::DecayArray<T5>::type,
+      typename internal::DecayArray<T6>::type,
+      typename internal::DecayArray<T7>::type,
+      typename internal::DecayArray<T8>::type,
+      typename internal::DecayArray<T9>::type,
+      typename internal::DecayArray<T10>::type> Args;
+  return internal::UnorderedElementsAreMatcher<Args>(Args(e1, e2, e3, e4, e5,
+      e6, e7, e8, e9, e10));
+}
+
+// AllOf(m1, m2, ..., mk) matches any value that matches all of the given
+// sub-matchers.  AllOf is called fully qualified to prevent ADL from firing.
+
+template <typename M1, typename M2>
+inline typename internal::AllOfResult2<M1, M2>::type
+AllOf(M1 m1, M2 m2) {
+  return typename internal::AllOfResult2<M1, M2>::type(
+      m1,
+      m2);
+}
+
+template <typename M1, typename M2, typename M3>
+inline typename internal::AllOfResult3<M1, M2, M3>::type
+AllOf(M1 m1, M2 m2, M3 m3) {
+  return typename internal::AllOfResult3<M1, M2, M3>::type(
+      m1,
+      ::testing::AllOf(m2, m3));
+}
+
+template <typename M1, typename M2, typename M3, typename M4>
+inline typename internal::AllOfResult4<M1, M2, M3, M4>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4) {
+  return typename internal::AllOfResult4<M1, M2, M3, M4>::type(
+      ::testing::AllOf(m1, m2),
+      ::testing::AllOf(m3, m4));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5>
+inline typename internal::AllOfResult5<M1, M2, M3, M4, M5>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5) {
+  return typename internal::AllOfResult5<M1, M2, M3, M4, M5>::type(
+      ::testing::AllOf(m1, m2),
+      ::testing::AllOf(m3, m4, m5));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6>
+inline typename internal::AllOfResult6<M1, M2, M3, M4, M5, M6>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6) {
+  return typename internal::AllOfResult6<M1, M2, M3, M4, M5, M6>::type(
+      ::testing::AllOf(m1, m2, m3),
+      ::testing::AllOf(m4, m5, m6));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7>
+inline typename internal::AllOfResult7<M1, M2, M3, M4, M5, M6, M7>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7) {
+  return typename internal::AllOfResult7<M1, M2, M3, M4, M5, M6, M7>::type(
+      ::testing::AllOf(m1, m2, m3),
+      ::testing::AllOf(m4, m5, m6, m7));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8>
+inline typename internal::AllOfResult8<M1, M2, M3, M4, M5, M6, M7, M8>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8) {
+  return typename internal::AllOfResult8<M1, M2, M3, M4, M5, M6, M7, M8>::type(
+      ::testing::AllOf(m1, m2, m3, m4),
+      ::testing::AllOf(m5, m6, m7, m8));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9>
+inline typename internal::AllOfResult9<M1, M2, M3, M4, M5, M6, M7, M8, M9>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9) {
+  return typename internal::AllOfResult9<M1, M2, M3, M4, M5, M6, M7, M8,
+      M9>::type(
+      ::testing::AllOf(m1, m2, m3, m4),
+      ::testing::AllOf(m5, m6, m7, m8, m9));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9, typename M10>
+inline typename internal::AllOfResult10<M1, M2, M3, M4, M5, M6, M7, M8, M9,
+    M10>::type
+AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9, M10 m10) {
+  return typename internal::AllOfResult10<M1, M2, M3, M4, M5, M6, M7, M8, M9,
+      M10>::type(
+      ::testing::AllOf(m1, m2, m3, m4, m5),
+      ::testing::AllOf(m6, m7, m8, m9, m10));
+}
+
+// AnyOf(m1, m2, ..., mk) matches any value that matches any of the given
+// sub-matchers.  AnyOf is called fully qualified to prevent ADL from firing.
+
+template <typename M1, typename M2>
+inline typename internal::AnyOfResult2<M1, M2>::type
+AnyOf(M1 m1, M2 m2) {
+  return typename internal::AnyOfResult2<M1, M2>::type(
+      m1,
+      m2);
+}
+
+template <typename M1, typename M2, typename M3>
+inline typename internal::AnyOfResult3<M1, M2, M3>::type
+AnyOf(M1 m1, M2 m2, M3 m3) {
+  return typename internal::AnyOfResult3<M1, M2, M3>::type(
+      m1,
+      ::testing::AnyOf(m2, m3));
+}
+
+template <typename M1, typename M2, typename M3, typename M4>
+inline typename internal::AnyOfResult4<M1, M2, M3, M4>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4) {
+  return typename internal::AnyOfResult4<M1, M2, M3, M4>::type(
+      ::testing::AnyOf(m1, m2),
+      ::testing::AnyOf(m3, m4));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5>
+inline typename internal::AnyOfResult5<M1, M2, M3, M4, M5>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5) {
+  return typename internal::AnyOfResult5<M1, M2, M3, M4, M5>::type(
+      ::testing::AnyOf(m1, m2),
+      ::testing::AnyOf(m3, m4, m5));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6>
+inline typename internal::AnyOfResult6<M1, M2, M3, M4, M5, M6>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6) {
+  return typename internal::AnyOfResult6<M1, M2, M3, M4, M5, M6>::type(
+      ::testing::AnyOf(m1, m2, m3),
+      ::testing::AnyOf(m4, m5, m6));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7>
+inline typename internal::AnyOfResult7<M1, M2, M3, M4, M5, M6, M7>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7) {
+  return typename internal::AnyOfResult7<M1, M2, M3, M4, M5, M6, M7>::type(
+      ::testing::AnyOf(m1, m2, m3),
+      ::testing::AnyOf(m4, m5, m6, m7));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8>
+inline typename internal::AnyOfResult8<M1, M2, M3, M4, M5, M6, M7, M8>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8) {
+  return typename internal::AnyOfResult8<M1, M2, M3, M4, M5, M6, M7, M8>::type(
+      ::testing::AnyOf(m1, m2, m3, m4),
+      ::testing::AnyOf(m5, m6, m7, m8));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9>
+inline typename internal::AnyOfResult9<M1, M2, M3, M4, M5, M6, M7, M8, M9>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9) {
+  return typename internal::AnyOfResult9<M1, M2, M3, M4, M5, M6, M7, M8,
+      M9>::type(
+      ::testing::AnyOf(m1, m2, m3, m4),
+      ::testing::AnyOf(m5, m6, m7, m8, m9));
+}
+
+template <typename M1, typename M2, typename M3, typename M4, typename M5,
+    typename M6, typename M7, typename M8, typename M9, typename M10>
+inline typename internal::AnyOfResult10<M1, M2, M3, M4, M5, M6, M7, M8, M9,
+    M10>::type
+AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9, M10 m10) {
+  return typename internal::AnyOfResult10<M1, M2, M3, M4, M5, M6, M7, M8, M9,
+      M10>::type(
+      ::testing::AnyOf(m1, m2, m3, m4, m5),
+      ::testing::AnyOf(m6, m7, m8, m9, m10));
+}
+
+}  // namespace testing
+
+
+// The MATCHER* family of macros can be used in a namespace scope to
+// define custom matchers easily.
+//
+// Basic Usage
+// ===========
+//
+// The syntax
+//
+//   MATCHER(name, description_string) { statements; }
+//
+// defines a matcher with the given name that executes the statements,
+// which must return a bool to indicate if the match succeeds.  Inside
+// the statements, you can refer to the value being matched by 'arg',
+// and refer to its type by 'arg_type'.
+//
+// The description string documents what the matcher does, and is used
+// to generate the failure message when the match fails.  Since a
+// MATCHER() is usually defined in a header file shared by multiple
+// C++ source files, we require the description to be a C-string
+// literal to avoid possible side effects.  It can be empty, in which
+// case we'll use the sequence of words in the matcher name as the
+// description.
+//
+// For example:
+//
+//   MATCHER(IsEven, "") { return (arg % 2) == 0; }
+//
+// allows you to write
+//
+//   // Expects mock_foo.Bar(n) to be called where n is even.
+//   EXPECT_CALL(mock_foo, Bar(IsEven()));
+//
+// or,
+//
+//   // Verifies that the value of some_expression is even.
+//   EXPECT_THAT(some_expression, IsEven());
+//
+// If the above assertion fails, it will print something like:
+//
+//   Value of: some_expression
+//   Expected: is even
+//     Actual: 7
+//
+// where the description "is even" is automatically calculated from the
+// matcher name IsEven.
+//
+// Argument Type
+// =============
+//
+// Note that the type of the value being matched (arg_type) is
+// determined by the context in which you use the matcher and is
+// supplied to you by the compiler, so you don't need to worry about
+// declaring it (nor can you).  This allows the matcher to be
+// polymorphic.  For example, IsEven() can be used to match any type
+// where the value of "(arg % 2) == 0" can be implicitly converted to
+// a bool.  In the "Bar(IsEven())" example above, if method Bar()
+// takes an int, 'arg_type' will be int; if it takes an unsigned long,
+// 'arg_type' will be unsigned long; and so on.
+//
+// Parameterizing Matchers
+// =======================
+//
+// Sometimes you'll want to parameterize the matcher.  For that you
+// can use another macro:
+//
+//   MATCHER_P(name, param_name, description_string) { statements; }
+//
+// For example:
+//
+//   MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; }
+//
+// will allow you to write:
+//
+//   EXPECT_THAT(Blah("a"), HasAbsoluteValue(n));
+//
+// which may lead to this message (assuming n is 10):
+//
+//   Value of: Blah("a")
+//   Expected: has absolute value 10
+//     Actual: -9
+//
+// Note that both the matcher description and its parameter are
+// printed, making the message human-friendly.
+//
+// In the matcher definition body, you can write 'foo_type' to
+// reference the type of a parameter named 'foo'.  For example, in the
+// body of MATCHER_P(HasAbsoluteValue, value) above, you can write
+// 'value_type' to refer to the type of 'value'.
+//
+// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P10 to
+// support multi-parameter matchers.
+//
+// Describing Parameterized Matchers
+// =================================
+//
+// The last argument to MATCHER*() is a string-typed expression.  The
+// expression can reference all of the matcher's parameters and a
+// special bool-typed variable named 'negation'.  When 'negation' is
+// false, the expression should evaluate to the matcher's description;
+// otherwise it should evaluate to the description of the negation of
+// the matcher.  For example,
+//
+//   using testing::PrintToString;
+//
+//   MATCHER_P2(InClosedRange, low, hi,
+//       string(negation ? "is not" : "is") + " in range [" +
+//       PrintToString(low) + ", " + PrintToString(hi) + "]") {
+//     return low <= arg && arg <= hi;
+//   }
+//   ...
+//   EXPECT_THAT(3, InClosedRange(4, 6));
+//   EXPECT_THAT(3, Not(InClosedRange(2, 4)));
+//
+// would generate two failures that contain the text:
+//
+//   Expected: is in range [4, 6]
+//   ...
+//   Expected: is not in range [2, 4]
+//
+// If you specify "" as the description, the failure message will
+// contain the sequence of words in the matcher name followed by the
+// parameter values printed as a tuple.  For example,
+//
+//   MATCHER_P2(InClosedRange, low, hi, "") { ... }
+//   ...
+//   EXPECT_THAT(3, InClosedRange(4, 6));
+//   EXPECT_THAT(3, Not(InClosedRange(2, 4)));
+//
+// would generate two failures that contain the text:
+//
+//   Expected: in closed range (4, 6)
+//   ...
+//   Expected: not (in closed range (2, 4))
+//
+// Types of Matcher Parameters
+// ===========================
+//
+// For the purpose of typing, you can view
+//
+//   MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... }
+//
+// as shorthand for
+//
+//   template <typename p1_type, ..., typename pk_type>
+//   FooMatcherPk<p1_type, ..., pk_type>
+//   Foo(p1_type p1, ..., pk_type pk) { ... }
+//
+// When you write Foo(v1, ..., vk), the compiler infers the types of
+// the parameters v1, ..., and vk for you.  If you are not happy with
+// the result of the type inference, you can specify the types by
+// explicitly instantiating the template, as in Foo<long, bool>(5,
+// false).  As said earlier, you don't get to (or need to) specify
+// 'arg_type' as that's determined by the context in which the matcher
+// is used.  You can assign the result of expression Foo(p1, ..., pk)
+// to a variable of type FooMatcherPk<p1_type, ..., pk_type>.  This
+// can be useful when composing matchers.
+//
+// While you can instantiate a matcher template with reference types,
+// passing the parameters by pointer usually makes your code more
+// readable.  If, however, you still want to pass a parameter by
+// reference, be aware that in the failure message generated by the
+// matcher you will see the value of the referenced object but not its
+// address.
+//
+// Explaining Match Results
+// ========================
+//
+// Sometimes the matcher description alone isn't enough to explain why
+// the match has failed or succeeded.  For example, when expecting a
+// long string, it can be very helpful to also print the diff between
+// the expected string and the actual one.  To achieve that, you can
+// optionally stream additional information to a special variable
+// named result_listener, whose type is a pointer to class
+// MatchResultListener:
+//
+//   MATCHER_P(EqualsLongString, str, "") {
+//     if (arg == str) return true;
+//
+//     *result_listener << "the difference: "
+///                     << DiffStrings(str, arg);
+//     return false;
+//   }
+//
+// Overloading Matchers
+// ====================
+//
+// You can overload matchers with different numbers of parameters:
+//
+//   MATCHER_P(Blah, a, description_string1) { ... }
+//   MATCHER_P2(Blah, a, b, description_string2) { ... }
+//
+// Caveats
+// =======
+//
+// When defining a new matcher, you should also consider implementing
+// MatcherInterface or using MakePolymorphicMatcher().  These
+// approaches require more work than the MATCHER* macros, but also
+// give you more control on the types of the value being matched and
+// the matcher parameters, which may leads to better compiler error
+// messages when the matcher is used wrong.  They also allow
+// overloading matchers based on parameter types (as opposed to just
+// based on the number of parameters).
+//
+// MATCHER*() can only be used in a namespace scope.  The reason is
+// that C++ doesn't yet allow function-local types to be used to
+// instantiate templates.  The up-coming C++0x standard will fix this.
+// Once that's done, we'll consider supporting using MATCHER*() inside
+// a function.
+//
+// More Information
+// ================
+//
+// To learn more about using these macros, please search for 'MATCHER'
+// on http://code.google.com/p/googlemock/wiki/CookBook.
+
+#define MATCHER(name, description)\
+  class name##Matcher {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl()\
+           {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<>()));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>());\
+    }\
+    name##Matcher() {\
+    }\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##Matcher);\
+  };\
+  inline name##Matcher name() {\
+    return name##Matcher();\
+  }\
+  template <typename arg_type>\
+  bool name##Matcher::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P(name, p0, description)\
+  template <typename p0##_type>\
+  class name##MatcherP {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      explicit gmock_Impl(p0##_type gmock_p0)\
+           : p0(gmock_p0) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type>(p0)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0));\
+    }\
+    explicit name##MatcherP(p0##_type gmock_p0) : p0(gmock_p0) {\
+    }\
+    p0##_type p0;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP);\
+  };\
+  template <typename p0##_type>\
+  inline name##MatcherP<p0##_type> name(p0##_type p0) {\
+    return name##MatcherP<p0##_type>(p0);\
+  }\
+  template <typename p0##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP<p0##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P2(name, p0, p1, description)\
+  template <typename p0##_type, typename p1##_type>\
+  class name##MatcherP2 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1)\
+           : p0(gmock_p0), p1(gmock_p1) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type>(p0, p1)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1));\
+    }\
+    name##MatcherP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+        p1(gmock_p1) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP2);\
+  };\
+  template <typename p0##_type, typename p1##_type>\
+  inline name##MatcherP2<p0##_type, p1##_type> name(p0##_type p0, \
+      p1##_type p1) {\
+    return name##MatcherP2<p0##_type, p1##_type>(p0, p1);\
+  }\
+  template <typename p0##_type, typename p1##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP2<p0##_type, \
+      p1##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P3(name, p0, p1, p2, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  class name##MatcherP3 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type>(p0, p1, \
+                    p2)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2));\
+    }\
+    name##MatcherP3(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP3);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  inline name##MatcherP3<p0##_type, p1##_type, p2##_type> name(p0##_type p0, \
+      p1##_type p1, p2##_type p2) {\
+    return name##MatcherP3<p0##_type, p1##_type, p2##_type>(p0, p1, p2);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP3<p0##_type, p1##_type, \
+      p2##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P4(name, p0, p1, p2, p3, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  class name##MatcherP4 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, \
+                    p3##_type>(p0, p1, p2, p3)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3));\
+    }\
+    name##MatcherP4(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP4);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  inline name##MatcherP4<p0##_type, p1##_type, p2##_type, \
+      p3##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+      p3##_type p3) {\
+    return name##MatcherP4<p0##_type, p1##_type, p2##_type, p3##_type>(p0, \
+        p1, p2, p3);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP4<p0##_type, p1##_type, p2##_type, \
+      p3##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P5(name, p0, p1, p2, p3, p4, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  class name##MatcherP5 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type>(p0, p1, p2, p3, p4)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4));\
+    }\
+    name##MatcherP5(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, \
+        p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP5);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  inline name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4) {\
+    return name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type>(p0, p1, p2, p3, p4);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  class name##MatcherP6 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4), p5(gmock_p5) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5));\
+    }\
+    name##MatcherP6(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP6);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  inline name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+      p3##_type p3, p4##_type p4, p5##_type p5) {\
+    return name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+      p5##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  class name##MatcherP7 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, \
+                    p6)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6));\
+    }\
+    name##MatcherP7(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \
+        p6(gmock_p6) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP7);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  inline name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type> name(p0##_type p0, p1##_type p1, \
+      p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+      p6##_type p6) {\
+    return name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, p6);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+      p5##_type, p6##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  class name##MatcherP8 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, \
+                    p3, p4, p5, p6, p7)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7));\
+    }\
+    name##MatcherP8(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, \
+        p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+        p7(gmock_p7) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP8);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  inline name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type> name(p0##_type p0, \
+      p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+      p6##_type p6, p7##_type p7) {\
+    return name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, p3, p4, p5, \
+        p6, p7);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+      p5##_type, p6##_type, \
+      p7##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  class name##MatcherP9 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+               p8(gmock_p8) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+      p8##_type p8;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type, p5##_type, p6##_type, p7##_type, \
+                    p8##_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8));\
+    }\
+    name##MatcherP9(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+        p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+        p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+        p8(gmock_p8) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+    p8##_type p8;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP9);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  inline name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type, \
+      p8##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \
+      p8##_type p8) {\
+    return name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type, p8##_type>(p0, p1, p2, \
+        p3, p4, p5, p6, p7, p8);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+      p5##_type, p6##_type, p7##_type, \
+      p8##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description)\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  class name##MatcherP10 {\
+   public:\
+    template <typename arg_type>\
+    class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+     public:\
+      gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+          p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+          p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+          p9##_type gmock_p9)\
+           : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+               p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+               p8(gmock_p8), p9(gmock_p9) {}\
+      virtual bool MatchAndExplain(\
+          arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+      virtual void DescribeTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(false);\
+      }\
+      virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+        *gmock_os << FormatDescription(true);\
+      }\
+      p0##_type p0;\
+      p1##_type p1;\
+      p2##_type p2;\
+      p3##_type p3;\
+      p4##_type p4;\
+      p5##_type p5;\
+      p6##_type p6;\
+      p7##_type p7;\
+      p8##_type p8;\
+      p9##_type p9;\
+     private:\
+      ::testing::internal::string FormatDescription(bool negation) const {\
+        const ::testing::internal::string gmock_description = (description);\
+        if (!gmock_description.empty())\
+          return gmock_description;\
+        return ::testing::internal::FormatMatcherDescription(\
+            negation, #name, \
+            ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+                ::testing::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+                    p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+                    p9##_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)));\
+      }\
+      GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+    };\
+    template <typename arg_type>\
+    operator ::testing::Matcher<arg_type>() const {\
+      return ::testing::Matcher<arg_type>(\
+          new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9));\
+    }\
+    name##MatcherP10(p0##_type gmock_p0, p1##_type gmock_p1, \
+        p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+        p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+        p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \
+        p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+        p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {\
+    }\
+    p0##_type p0;\
+    p1##_type p1;\
+    p2##_type p2;\
+    p3##_type p3;\
+    p4##_type p4;\
+    p5##_type p5;\
+    p6##_type p6;\
+    p7##_type p7;\
+    p8##_type p8;\
+    p9##_type p9;\
+   private:\
+    GTEST_DISALLOW_ASSIGN_(name##MatcherP10);\
+  };\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  inline name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+      p9##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+      p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+      p9##_type p9) {\
+    return name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+        p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, p9##_type>(p0, \
+        p1, p2, p3, p4, p5, p6, p7, p8, p9);\
+  }\
+  template <typename p0##_type, typename p1##_type, typename p2##_type, \
+      typename p3##_type, typename p4##_type, typename p5##_type, \
+      typename p6##_type, typename p7##_type, typename p8##_type, \
+      typename p9##_type>\
+  template <typename arg_type>\
+  bool name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+      p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+      p9##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+      arg_type arg, \
+      ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+          const
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some actions that depend on gmock-generated-actions.h.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+
+#include <algorithm>
+
+
+namespace testing {
+namespace internal {
+
+// Implements the Invoke(f) action.  The template argument
+// FunctionImpl is the implementation type of f, which can be either a
+// function pointer or a functor.  Invoke(f) can be used as an
+// Action<F> as long as f's type is compatible with F (i.e. f can be
+// assigned to a tr1::function<F>).
+template <typename FunctionImpl>
+class InvokeAction {
+ public:
+  // The c'tor makes a copy of function_impl (either a function
+  // pointer or a functor).
+  explicit InvokeAction(FunctionImpl function_impl)
+      : function_impl_(function_impl) {}
+
+  template <typename Result, typename ArgumentTuple>
+  Result Perform(const ArgumentTuple& args) {
+    return InvokeHelper<Result, ArgumentTuple>::Invoke(function_impl_, args);
+  }
+
+ private:
+  FunctionImpl function_impl_;
+
+  GTEST_DISALLOW_ASSIGN_(InvokeAction);
+};
+
+// Implements the Invoke(object_ptr, &Class::Method) action.
+template <class Class, typename MethodPtr>
+class InvokeMethodAction {
+ public:
+  InvokeMethodAction(Class* obj_ptr, MethodPtr method_ptr)
+      : method_ptr_(method_ptr), obj_ptr_(obj_ptr) {}
+
+  template <typename Result, typename ArgumentTuple>
+  Result Perform(const ArgumentTuple& args) const {
+    return InvokeHelper<Result, ArgumentTuple>::InvokeMethod(
+        obj_ptr_, method_ptr_, args);
+  }
+
+ private:
+  // The order of these members matters.  Reversing the order can trigger
+  // warning C4121 in MSVC (see
+  // http://computer-programming-forum.com/7-vc.net/6fbc30265f860ad1.htm ).
+  const MethodPtr method_ptr_;
+  Class* const obj_ptr_;
+
+  GTEST_DISALLOW_ASSIGN_(InvokeMethodAction);
+};
+
+// An internal replacement for std::copy which mimics its behavior. This is
+// necessary because Visual Studio deprecates ::std::copy, issuing warning 4996.
+// However Visual Studio 2010 and later do not honor #pragmas which disable that
+// warning.
+template<typename InputIterator, typename OutputIterator>
+inline OutputIterator CopyElements(InputIterator first,
+                                   InputIterator last,
+                                   OutputIterator output) {
+  for (; first != last; ++first, ++output) {
+    *output = *first;
+  }
+  return output;
+}
+
+}  // namespace internal
+
+// Various overloads for Invoke().
+
+// Creates an action that invokes 'function_impl' with the mock
+// function's arguments.
+template <typename FunctionImpl>
+PolymorphicAction<internal::InvokeAction<FunctionImpl> > Invoke(
+    FunctionImpl function_impl) {
+  return MakePolymorphicAction(
+      internal::InvokeAction<FunctionImpl>(function_impl));
+}
+
+// Creates an action that invokes the given method on the given object
+// with the mock function's arguments.
+template <class Class, typename MethodPtr>
+PolymorphicAction<internal::InvokeMethodAction<Class, MethodPtr> > Invoke(
+    Class* obj_ptr, MethodPtr method_ptr) {
+  return MakePolymorphicAction(
+      internal::InvokeMethodAction<Class, MethodPtr>(obj_ptr, method_ptr));
+}
+
+// WithoutArgs(inner_action) can be used in a mock function with a
+// non-empty argument list to perform inner_action, which takes no
+// argument.  In other words, it adapts an action accepting no
+// argument to one that accepts (and ignores) arguments.
+template <typename InnerAction>
+inline internal::WithArgsAction<InnerAction>
+WithoutArgs(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction>(action);
+}
+
+// WithArg<k>(an_action) creates an action that passes the k-th
+// (0-based) argument of the mock function to an_action and performs
+// it.  It adapts an action accepting one argument to one that accepts
+// multiple arguments.  For convenience, we also provide
+// WithArgs<k>(an_action) (defined below) as a synonym.
+template <int k, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k>
+WithArg(const InnerAction& action) {
+  return internal::WithArgsAction<InnerAction, k>(action);
+}
+
+// The ACTION*() macros trigger warning C4100 (unreferenced formal
+// parameter) in MSVC with -W4.  Unfortunately they cannot be fixed in
+// the macro definition, as the warnings are generated when the macro
+// is expanded and macro expansion cannot contain #pragma.  Therefore
+// we suppress them here.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4100)
+#endif
+
+// Action ReturnArg<k>() returns the k-th argument of the mock function.
+ACTION_TEMPLATE(ReturnArg,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_0_VALUE_PARAMS()) {
+  return ::testing::get<k>(args);
+}
+
+// Action SaveArg<k>(pointer) saves the k-th (0-based) argument of the
+// mock function to *pointer.
+ACTION_TEMPLATE(SaveArg,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_1_VALUE_PARAMS(pointer)) {
+  *pointer = ::testing::get<k>(args);
+}
+
+// Action SaveArgPointee<k>(pointer) saves the value pointed to
+// by the k-th (0-based) argument of the mock function to *pointer.
+ACTION_TEMPLATE(SaveArgPointee,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_1_VALUE_PARAMS(pointer)) {
+  *pointer = *::testing::get<k>(args);
+}
+
+// Action SetArgReferee<k>(value) assigns 'value' to the variable
+// referenced by the k-th (0-based) argument of the mock function.
+ACTION_TEMPLATE(SetArgReferee,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_1_VALUE_PARAMS(value)) {
+  typedef typename ::testing::tuple_element<k, args_type>::type argk_type;
+  // Ensures that argument #k is a reference.  If you get a compiler
+  // error on the next line, you are using SetArgReferee<k>(value) in
+  // a mock function whose k-th (0-based) argument is not a reference.
+  GTEST_COMPILE_ASSERT_(internal::is_reference<argk_type>::value,
+                        SetArgReferee_must_be_used_with_a_reference_argument);
+  ::testing::get<k>(args) = value;
+}
+
+// Action SetArrayArgument<k>(first, last) copies the elements in
+// source range [first, last) to the array pointed to by the k-th
+// (0-based) argument, which can be either a pointer or an
+// iterator. The action does not take ownership of the elements in the
+// source range.
+ACTION_TEMPLATE(SetArrayArgument,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_2_VALUE_PARAMS(first, last)) {
+  // Visual Studio deprecates ::std::copy, so we use our own copy in that case.
+#ifdef _MSC_VER
+  internal::CopyElements(first, last, ::testing::get<k>(args));
+#else
+  ::std::copy(first, last, ::testing::get<k>(args));
+#endif
+}
+
+// Action DeleteArg<k>() deletes the k-th (0-based) argument of the mock
+// function.
+ACTION_TEMPLATE(DeleteArg,
+                HAS_1_TEMPLATE_PARAMS(int, k),
+                AND_0_VALUE_PARAMS()) {
+  delete ::testing::get<k>(args);
+}
+
+// This action returns the value pointed to by 'pointer'.
+ACTION_P(ReturnPointee, pointer) { return *pointer; }
+
+// Action Throw(exception) can be used in a mock function of any type
+// to throw the given exception.  Any copyable value can be thrown.
+#if GTEST_HAS_EXCEPTIONS
+
+// Suppresses the 'unreachable code' warning that VC generates in opt modes.
+# ifdef _MSC_VER
+#  pragma warning(push)          // Saves the current warning state.
+#  pragma warning(disable:4702)  // Temporarily disables warning 4702.
+# endif
+ACTION_P(Throw, exception) { throw exception; }
+# ifdef _MSC_VER
+#  pragma warning(pop)           // Restores the warning state.
+# endif
+
+#endif  // GTEST_HAS_EXCEPTIONS
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+// Copyright 2013, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: marcus.boerger@google.com (Marcus Boerger)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some matchers that depend on gmock-generated-matchers.h.
+//
+// Note that tests are implemented in gmock-matchers_test.cc rather than
+// gmock-more-matchers-test.cc.
+
+#ifndef GMOCK_GMOCK_MORE_MATCHERS_H_
+#define GMOCK_GMOCK_MORE_MATCHERS_H_
+
+
+namespace testing {
+
+// Defines a matcher that matches an empty container. The container must
+// support both size() and empty(), which all STL-like containers provide.
+MATCHER(IsEmpty, negation ? "isn't empty" : "is empty") {
+  if (arg.empty()) {
+    return true;
+  }
+  *result_listener << "whose size is " << arg.size();
+  return false;
+}
+
+}  // namespace testing
+
+#endif  // GMOCK_GMOCK_MORE_MATCHERS_H_
+
+namespace testing {
+
+// Declares Google Mock flags that we want a user to use programmatically.
+GMOCK_DECLARE_bool_(catch_leaked_mocks);
+GMOCK_DECLARE_string_(verbose);
+
+// Initializes Google Mock.  This must be called before running the
+// tests.  In particular, it parses the command line for the flags
+// that Google Mock recognizes.  Whenever a Google Mock flag is seen,
+// it is removed from argv, and *argc is decremented.
+//
+// No value is returned.  Instead, the Google Mock flag variables are
+// updated.
+//
+// Since Google Test is needed for Google Mock to work, this function
+// also initializes Google Test and parses its flags, if that hasn't
+// been done.
+GTEST_API_ void InitGoogleMock(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleMock(int* argc, wchar_t** argv);
+
+}  // namespace testing
+
+#endif  // GMOCK_INCLUDE_GMOCK_GMOCK_H_
diff --git a/internal/ceres/gmock/mock-log.h b/internal/ceres/gmock/mock-log.h
new file mode 100644
index 0000000..54669b7
--- /dev/null
+++ b/internal/ceres/gmock/mock-log.h
@@ -0,0 +1,153 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Zhanyong Wan
+//
+// Defines the ScopedMockLog class (using Google C++ Mocking
+// Framework), which is convenient for testing code that uses LOG().
+//
+// NOTE(keir): This is a fork until Google Log exports the scoped mock log
+// class; see: http://code.google.com/p/google-glog/issues/detail?id=88
+
+#ifndef GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
+#define GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
+
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "glog/logging.h"
+
+namespace testing {
+
+// A ScopedMockLog object intercepts LOG() messages issued during its
+// lifespan.  Using this together with Google C++ Mocking Framework,
+// it's very easy to test how a piece of code calls LOG().  The
+// typical usage:
+//
+//   TEST(FooTest, LogsCorrectly) {
+//     ScopedMockLog log;
+//
+//     // We expect the WARNING "Something bad!" exactly twice.
+//     EXPECT_CALL(log, Log(WARNING, _, "Something bad!"))
+//         .Times(2);
+//
+//     // We allow foo.cc to call LOG(INFO) any number of times.
+//     EXPECT_CALL(log, Log(INFO, HasSubstr("/foo.cc"), _))
+//         .Times(AnyNumber());
+//
+//     Foo();  // Exercises the code under test.
+//   }
+class ScopedMockLog : public google::LogSink {
+ public:
+  // When a ScopedMockLog object is constructed, it starts to
+  // intercept logs.
+  ScopedMockLog() { AddLogSink(this); }
+
+  // When the object is destructed, it stops intercepting logs.
+  virtual ~ScopedMockLog() { RemoveLogSink(this); }
+
+  // Implements the mock method:
+  //
+  //   void Log(LogSeverity severity, const string& file_path,
+  //            const string& message);
+  //
+  // The second argument to Send() is the full path of the source file
+  // in which the LOG() was issued.
+  //
+  // Note, that in a multi-threaded environment, all LOG() messages from a
+  // single thread will be handled in sequence, but that cannot be guaranteed
+  // for messages from different threads. In fact, if the same or multiple
+  // expectations are matched on two threads concurrently, their actions will
+  // be executed concurrently as well and may interleave.
+  MOCK_METHOD3(Log, void(google::LogSeverity severity,
+                         const std::string& file_path,
+                         const std::string& message));
+
+ private:
+  // Implements the send() virtual function in class LogSink.
+  // Whenever a LOG() statement is executed, this function will be
+  // invoked with information presented in the LOG().
+  //
+  // The method argument list is long and carries much information a
+  // test usually doesn't care about, so we trim the list before
+  // forwarding the call to Log(), which is much easier to use in
+  // tests.
+  //
+  // We still cannot call Log() directly, as it may invoke other LOG()
+  // messages, either due to Invoke, or due to an error logged in
+  // Google C++ Mocking Framework code, which would trigger a deadlock
+  // since a lock is held during send().
+  //
+  // Hence, we save the message for WaitTillSent() which will be called after
+  // the lock on send() is released, and we'll call Log() inside
+  // WaitTillSent(). Since while a single send() call may be running at a
+  // time, multiple WaitTillSent() calls (along with the one send() call) may
+  // be running simultaneously, we ensure thread-safety of the exchange between
+  // send() and WaitTillSent(), and that for each message, LOG(), send(),
+  // WaitTillSent() and Log() are executed in the same thread.
+  virtual void send(google::LogSeverity severity,
+                    const char* full_filename,
+                    const char* base_filename, int line, const tm* tm_time,
+                    const char* message, size_t message_len) {
+    // We are only interested in the log severity, full file name, and
+    // log message.
+    message_info_.severity = severity;
+    message_info_.file_path = full_filename;
+    message_info_.message = std::string(message, message_len);
+  }
+
+  // Implements the WaitTillSent() virtual function in class LogSink.
+  // It will be executed after send() and after the global logging lock is
+  // released, so calls within it (or rather within the Log() method called
+  // within) may also issue LOG() statements.
+  //
+  // LOG(), send(), WaitTillSent() and Log() will occur in the same thread for
+  // a given log message.
+  virtual void WaitTillSent() {
+    // First, and very importantly, we save a copy of the message being
+    // processed before calling Log(), since Log() may indirectly call send()
+    // and WaitTillSent() in the same thread again.
+    MessageInfo message_info = message_info_;
+    Log(message_info.severity, message_info.file_path, message_info.message);
+  }
+
+  // All relevant information about a logged message that needs to be passed
+  // from send() to WaitTillSent().
+  struct MessageInfo {
+    google::LogSeverity severity;
+    std::string file_path;
+    std::string message;
+  };
+  MessageInfo message_info_;
+};
+
+}  // namespace testing
+
+#endif  // GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
diff --git a/internal/ceres/gmock_gtest_all.cc b/internal/ceres/gmock_gtest_all.cc
new file mode 100644
index 0000000..b3d980b
--- /dev/null
+++ b/internal/ceres/gmock_gtest_all.cc
@@ -0,0 +1,12265 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest/gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+    : public TestPartResultReporterInterface {
+ public:
+  // The two possible mocking modes of this object.
+  enum InterceptMode {
+    INTERCEPT_ONLY_CURRENT_THREAD,  // Intercepts only thread local failures.
+    INTERCEPT_ALL_THREADS           // Intercepts all failures.
+  };
+
+  // The c'tor sets this object as the test part result reporter used
+  // by Google Test.  The 'result' parameter specifies where to report the
+  // results. This reporter will only catch failures generated in the current
+  // thread. DEPRECATED
+  explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+  // Same as above, but you can choose the interception scope of this object.
+  ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+                                   TestPartResultArray* result);
+
+  // The d'tor restores the previous test part result reporter.
+  virtual ~ScopedFakeTestPartResultReporter();
+
+  // Appends the TestPartResult object to the TestPartResultArray
+  // received in the constructor.
+  //
+  // This method is from the TestPartResultReporterInterface
+  // interface.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+ private:
+  void Init();
+
+  const InterceptMode intercept_mode_;
+  TestPartResultReporterInterface* old_reporter_;
+  TestPartResultArray* const result_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE().  Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring.  If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+  // The constructor remembers the arguments.
+  SingleFailureChecker(const TestPartResultArray* results,
+                       TestPartResult::Type type,
+                       const string& substr);
+  ~SingleFailureChecker();
+ private:
+  const TestPartResultArray* const results_;
+  const TestPartResult::Type type_;
+  const string substr_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+}  // namespace internal
+
+}  // namespace testing
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures.  It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+//   - 'statement' cannot reference local non-static variables or
+//     non-static members of the current object.
+//   - 'statement' cannot return a value.
+//   - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works.  The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+  do { \
+    class GTestExpectFatalFailureHelper {\
+     public:\
+      static void Execute() { statement; }\
+    };\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+      GTestExpectFatalFailureHelper::Execute();\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+  do { \
+    class GTestExpectFatalFailureHelper {\
+     public:\
+      static void Execute() { statement; }\
+    };\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ALL_THREADS, &gtest_failures);\
+      GTestExpectFatalFailureHelper::Execute();\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures.  It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+//   - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works.  If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma.  The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+//   if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+//   GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+  do {\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+        (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+      if (::testing::internal::AlwaysTrue()) { statement; }\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+  do {\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+        (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
+          &gtest_failures);\
+      if (::testing::internal::AlwaysTrue()) { statement; }\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include <ctype.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <limits>
+#include <list>
+#include <map>
+#include <ostream>  // NOLINT
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+# include <fcntl.h>  // NOLINT
+# include <limits.h>  // NOLINT
+# include <sched.h>  // NOLINT
+// Declares vsnprintf().  This header is not available on Windows.
+# include <strings.h>  // NOLINT
+# include <sys/mman.h>  // NOLINT
+# include <sys/time.h>  // NOLINT
+# include <unistd.h>  // NOLINT
+# include <string>
+
+#elif GTEST_OS_SYMBIAN
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h>  // NOLINT
+
+#elif GTEST_OS_ZOS
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h>  // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+# include <strings.h>  // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE  // We are on Windows CE.
+
+# include <windows.h>  // NOLINT
+# undef min
+
+#elif GTEST_OS_WINDOWS  // We are on Windows proper.
+
+# include <io.h>  // NOLINT
+# include <sys/timeb.h>  // NOLINT
+# include <sys/types.h>  // NOLINT
+# include <sys/stat.h>  // NOLINT
+
+# if GTEST_OS_WINDOWS_MINGW
+// MinGW has gettimeofday() but not _ftime64().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+//   gettimeofday().
+// TODO(kenton@google.com): There are other ways to get the time on
+//   Windows, like GetTickCount() or GetSystemTimeAsFileTime().  MinGW
+//   supports these.  consider using them instead.
+#  define GTEST_HAS_GETTIMEOFDAY_ 1
+#  include <sys/time.h>  // NOLINT
+# endif  // GTEST_OS_WINDOWS_MINGW
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <windows.h>  // NOLINT
+# undef min
+
+#else
+
+// Assume other platforms have gettimeofday().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+//   gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <sys/time.h>  // NOLINT
+# include <unistd.h>  // NOLINT
+
+#endif  // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h>  // NOLINT
+# include <netdb.h>  // NOLINT
+# include <sys/socket.h>  // NOLINT
+# include <sys/types.h>  // NOLINT
+#endif
+
+// Indicates that this translation unit is part of Google Test's
+// implementation.  It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error.  This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// This file contains purely Google Test's internal implementation.  Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
+#define GTEST_SRC_GTEST_INTERNAL_INL_H_
+
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
+// If this file is included from the user's code, just say no.
+# error "gtest-internal-inl.h is part of Google Test's internal implementation."
+# error "It must not be included except by Google Test itself."
+#endif  // GTEST_IMPLEMENTATION_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif  // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h>  // For strtoll/_strtoul64/malloc/free.
+#include <string.h>  // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h>  // NOLINT
+# include <netdb.h>  // NOLINT
+#endif
+
+#if GTEST_OS_WINDOWS
+# include <windows.h>  // NOLINT
+#endif  // GTEST_OS_WINDOWS
+
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library.  This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kStreamResultToFlag[] = "stream_result_to";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+const char kFlagfileFlag[] = "flagfile";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Converts the given time in milliseconds to a date string in the ISO 8601
+// format, without the timezone information.  N.B.: due to the use the
+// non-reentrant localtime() function, this function is not thread safe.  Do
+// not use it in any code that can be called from multiple threads.
+GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+    const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+  const unsigned int raw_seed = (random_seed_flag == 0) ?
+      static_cast<unsigned int>(GetTimeInMillis()) :
+      static_cast<unsigned int>(random_seed_flag);
+
+  // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+  // it's easy to type.
+  const int normalized_seed =
+      static_cast<int>((raw_seed - 1U) %
+                       static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+  return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'.  The behavior is
+// undefined if 'seed' is invalid.  The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+  GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+      << "Invalid random seed " << seed << " - must be in [1, "
+      << kMaxRandomSeed << "].";
+  const int next_seed = seed + 1;
+  return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+  // The c'tor.
+  GTestFlagSaver() {
+    also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+    break_on_failure_ = GTEST_FLAG(break_on_failure);
+    catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+    color_ = GTEST_FLAG(color);
+    death_test_style_ = GTEST_FLAG(death_test_style);
+    death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+    filter_ = GTEST_FLAG(filter);
+    internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+    list_tests_ = GTEST_FLAG(list_tests);
+    output_ = GTEST_FLAG(output);
+    print_time_ = GTEST_FLAG(print_time);
+    random_seed_ = GTEST_FLAG(random_seed);
+    repeat_ = GTEST_FLAG(repeat);
+    shuffle_ = GTEST_FLAG(shuffle);
+    stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+    stream_result_to_ = GTEST_FLAG(stream_result_to);
+    throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+  }
+
+  // The d'tor is not virtual.  DO NOT INHERIT FROM THIS CLASS.
+  ~GTestFlagSaver() {
+    GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+    GTEST_FLAG(break_on_failure) = break_on_failure_;
+    GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+    GTEST_FLAG(color) = color_;
+    GTEST_FLAG(death_test_style) = death_test_style_;
+    GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+    GTEST_FLAG(filter) = filter_;
+    GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+    GTEST_FLAG(list_tests) = list_tests_;
+    GTEST_FLAG(output) = output_;
+    GTEST_FLAG(print_time) = print_time_;
+    GTEST_FLAG(random_seed) = random_seed_;
+    GTEST_FLAG(repeat) = repeat_;
+    GTEST_FLAG(shuffle) = shuffle_;
+    GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+    GTEST_FLAG(stream_result_to) = stream_result_to_;
+    GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+  }
+
+ private:
+  // Fields for saving the original values of flags.
+  bool also_run_disabled_tests_;
+  bool break_on_failure_;
+  bool catch_exceptions_;
+  std::string color_;
+  std::string death_test_style_;
+  bool death_test_use_fork_;
+  std::string filter_;
+  std::string internal_run_death_test_;
+  bool list_tests_;
+  std::string output_;
+  bool print_time_;
+  internal::Int32 random_seed_;
+  internal::Int32 repeat_;
+  bool shuffle_;
+  internal::Int32 stack_trace_depth_;
+  std::string stream_result_to_;
+  bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+GTEST_API_ std::string CodePointToUtf8(UInt32 code_point);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+                            const char* shard_index_str,
+                            bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+    int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+  // Implemented as an explicit loop since std::count_if() in libCstd on
+  // Solaris has a non-standard signature.
+  int count = 0;
+  for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
+    if (predicate(*it))
+      ++count;
+  }
+  return count;
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+  std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+  return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+                  std::vector<E>* v) {
+  const int size = static_cast<int>(v->size());
+  GTEST_CHECK_(0 <= begin && begin <= size)
+      << "Invalid shuffle range start " << begin << ": must be in range [0, "
+      << size << "].";
+  GTEST_CHECK_(begin <= end && end <= size)
+      << "Invalid shuffle range finish " << end << ": must be in range ["
+      << begin << ", " << size << "].";
+
+  // Fisher-Yates shuffle, from
+  // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+  for (int range_width = end - begin; range_width >= 2; range_width--) {
+    const int last_in_range = begin + range_width - 1;
+    const int selected = begin + random->Generate(range_width);
+    std::swap((*v)[selected], (*v)[last_in_range]);
+  }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+  ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object.  Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+  delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+  // Constructor.
+  //
+  // TestPropertyKeyIs has NO default constructor.
+  explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
+
+  // Returns true iff the test name of test property matches on key_.
+  bool operator()(const TestProperty& test_property) const {
+    return test_property.key() == key_;
+  }
+
+ private:
+  std::string key_;
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests.  It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag.  E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter.  If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+  // Functions for processing the gtest_output flag.
+
+  // Returns the output format, or "" for normal printed output.
+  static std::string GetOutputFormat();
+
+  // Returns the absolute path of the requested output file, or the
+  // default (test_detail.xml in the original working directory) if
+  // none was explicitly specified.
+  static std::string GetAbsolutePathToOutputFile();
+
+  // Functions for processing the gtest_filter flag.
+
+  // Returns true iff the wildcard pattern matches the string.  The
+  // first ':' or '\0' character in pattern marks the end of it.
+  //
+  // This recursive algorithm isn't very efficient, but is clear and
+  // works well enough for matching test names, which are short.
+  static bool PatternMatchesString(const char *pattern, const char *str);
+
+  // Returns true iff the user-specified filter matches the test case
+  // name and the test name.
+  static bool FilterMatchesTest(const std::string &test_case_name,
+                                const std::string &test_name);
+
+#if GTEST_OS_WINDOWS
+  // Function for supporting the gtest_catch_exception flag.
+
+  // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+  // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+  // This function is useful as an __except condition.
+  static int GTestShouldProcessSEH(DWORD exception_code);
+#endif  // GTEST_OS_WINDOWS
+
+  // Returns true if "name" matches the ':' separated list of glob-style
+  // filters in "filter".
+  static bool MatchesFilter(const std::string& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present.  Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+  OsStackTraceGetterInterface() {}
+  virtual ~OsStackTraceGetterInterface() {}
+
+  // Returns the current OS stack trace as an std::string.  Parameters:
+  //
+  //   max_depth  - the maximum number of stack frames to be included
+  //                in the trace.
+  //   skip_count - the number of top frames to be skipped; doesn't count
+  //                against max_depth.
+  virtual string CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+  // UponLeavingGTest() should be called immediately before Google Test calls
+  // user code. It saves some information about the current stack that
+  // CurrentStackTrace() will use to find and hide Google Test stack frames.
+  virtual void UponLeavingGTest() = 0;
+
+  // This string is inserted in place of stack frames that are part of
+  // Google Test's implementation.
+  static const char* const kElidedFramesMarker;
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+  OsStackTraceGetter() {}
+
+  virtual string CurrentStackTrace(int max_depth, int skip_count);
+  virtual void UponLeavingGTest();
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+  const char* file;
+  int line;
+  std::string message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+  : public TestPartResultReporterInterface {
+ public:
+  explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+  // Implements the TestPartResultReporterInterface. Reports the test part
+  // result in the current test.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+  UnitTestImpl* const unit_test_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+    : public TestPartResultReporterInterface {
+ public:
+  explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+  // Implements the TestPartResultReporterInterface. The implementation just
+  // delegates to the current global test part result reporter of *unit_test_.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+  UnitTestImpl* const unit_test_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class.  We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+  explicit UnitTestImpl(UnitTest* parent);
+  virtual ~UnitTestImpl();
+
+  // There are two different ways to register your own TestPartResultReporter.
+  // You can register your own repoter to listen either only for test results
+  // from the current thread or for results from all threads.
+  // By default, each per-thread test result repoter just passes a new
+  // TestPartResult to the global test result reporter, which registers the
+  // test part result for the currently running test.
+
+  // Returns the global test part result reporter.
+  TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+  // Sets the global test part result reporter.
+  void SetGlobalTestPartResultReporter(
+      TestPartResultReporterInterface* reporter);
+
+  // Returns the test part result reporter for the current thread.
+  TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+  // Sets the test part result reporter for the current thread.
+  void SetTestPartResultReporterForCurrentThread(
+      TestPartResultReporterInterface* reporter);
+
+  // Gets the number of successful test cases.
+  int successful_test_case_count() const;
+
+  // Gets the number of failed test cases.
+  int failed_test_case_count() const;
+
+  // Gets the number of all test cases.
+  int total_test_case_count() const;
+
+  // Gets the number of all test cases that contain at least one test
+  // that should run.
+  int test_case_to_run_count() const;
+
+  // Gets the number of successful tests.
+  int successful_test_count() const;
+
+  // Gets the number of failed tests.
+  int failed_test_count() const;
+
+  // Gets the number of disabled tests that will be reported in the XML report.
+  int reportable_disabled_test_count() const;
+
+  // Gets the number of disabled tests.
+  int disabled_test_count() const;
+
+  // Gets the number of tests to be printed in the XML report.
+  int reportable_test_count() const;
+
+  // Gets the number of all tests.
+  int total_test_count() const;
+
+  // Gets the number of tests that should run.
+  int test_to_run_count() const;
+
+  // Gets the time of the test program start, in ms from the start of the
+  // UNIX epoch.
+  TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+  // Gets the elapsed time, in milliseconds.
+  TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+  // Returns true iff the unit test passed (i.e. all test cases passed).
+  bool Passed() const { return !Failed(); }
+
+  // Returns true iff the unit test failed (i.e. some test case failed
+  // or something outside of all tests failed).
+  bool Failed() const {
+    return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
+  }
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  const TestCase* GetTestCase(int i) const {
+    const int index = GetElementOr(test_case_indices_, i, -1);
+    return index < 0 ? NULL : test_cases_[i];
+  }
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  TestCase* GetMutableTestCase(int i) {
+    const int index = GetElementOr(test_case_indices_, i, -1);
+    return index < 0 ? NULL : test_cases_[index];
+  }
+
+  // Provides access to the event listener list.
+  TestEventListeners* listeners() { return &listeners_; }
+
+  // Returns the TestResult for the test that's currently running, or
+  // the TestResult for the ad hoc test if no test is running.
+  TestResult* current_test_result();
+
+  // Returns the TestResult for the ad hoc test.
+  const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+  // Sets the OS stack trace getter.
+  //
+  // Does nothing if the input and the current OS stack trace getter
+  // are the same; otherwise, deletes the old getter and makes the
+  // input the current getter.
+  void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+  // Returns the current OS stack trace getter if it is not NULL;
+  // otherwise, creates an OsStackTraceGetter, makes it the current
+  // getter, and returns it.
+  OsStackTraceGetterInterface* os_stack_trace_getter();
+
+  // Returns the current OS stack trace as an std::string.
+  //
+  // The maximum number of stack frames to be included is specified by
+  // the gtest_stack_trace_depth flag.  The skip_count parameter
+  // specifies the number of top frames to be skipped, which doesn't
+  // count against the number of frames to be included.
+  //
+  // For example, if Foo() calls Bar(), which in turn calls
+  // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+  // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+  std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;
+
+  // Finds and returns a TestCase with the given name.  If one doesn't
+  // exist, creates one and returns it.
+  //
+  // Arguments:
+  //
+  //   test_case_name: name of the test case
+  //   type_param:     the name of the test's type parameter, or NULL if
+  //                   this is not a typed or a type-parameterized test.
+  //   set_up_tc:      pointer to the function that sets up the test case
+  //   tear_down_tc:   pointer to the function that tears down the test case
+  TestCase* GetTestCase(const char* test_case_name,
+                        const char* type_param,
+                        Test::SetUpTestCaseFunc set_up_tc,
+                        Test::TearDownTestCaseFunc tear_down_tc);
+
+  // Adds a TestInfo to the unit test.
+  //
+  // Arguments:
+  //
+  //   set_up_tc:    pointer to the function that sets up the test case
+  //   tear_down_tc: pointer to the function that tears down the test case
+  //   test_info:    the TestInfo object
+  void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
+                   Test::TearDownTestCaseFunc tear_down_tc,
+                   TestInfo* test_info) {
+    // In order to support thread-safe death tests, we need to
+    // remember the original working directory when the test program
+    // was first invoked.  We cannot do this in RUN_ALL_TESTS(), as
+    // the user may have changed the current directory before calling
+    // RUN_ALL_TESTS().  Therefore we capture the current directory in
+    // AddTestInfo(), which is called to register a TEST or TEST_F
+    // before main() is reached.
+    if (original_working_dir_.IsEmpty()) {
+      original_working_dir_.Set(FilePath::GetCurrentDir());
+      GTEST_CHECK_(!original_working_dir_.IsEmpty())
+          << "Failed to get the current working directory.";
+    }
+
+    GetTestCase(test_info->test_case_name(),
+                test_info->type_param(),
+                set_up_tc,
+                tear_down_tc)->AddTestInfo(test_info);
+  }
+
+#if GTEST_HAS_PARAM_TEST
+  // Returns ParameterizedTestCaseRegistry object used to keep track of
+  // value-parameterized tests and instantiate and register them.
+  internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
+    return parameterized_test_registry_;
+  }
+#endif  // GTEST_HAS_PARAM_TEST
+
+  // Sets the TestCase object for the test that's currently running.
+  void set_current_test_case(TestCase* a_current_test_case) {
+    current_test_case_ = a_current_test_case;
+  }
+
+  // Sets the TestInfo object for the test that's currently running.  If
+  // current_test_info is NULL, the assertion results will be stored in
+  // ad_hoc_test_result_.
+  void set_current_test_info(TestInfo* a_current_test_info) {
+    current_test_info_ = a_current_test_info;
+  }
+
+  // Registers all parameterized tests defined using TEST_P and
+  // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter
+  // combination. This method can be called more then once; it has guards
+  // protecting from registering the tests more then once.  If
+  // value-parameterized tests are disabled, RegisterParameterizedTests is
+  // present but does nothing.
+  void RegisterParameterizedTests();
+
+  // Runs all tests in this UnitTest object, prints the result, and
+  // returns true if all tests are successful.  If any exception is
+  // thrown during a test, this test is considered to be failed, but
+  // the rest of the tests will still be run.
+  bool RunAllTests();
+
+  // Clears the results of all tests, except the ad hoc tests.
+  void ClearNonAdHocTestResult() {
+    ForEach(test_cases_, TestCase::ClearTestCaseResult);
+  }
+
+  // Clears the results of ad-hoc test assertions.
+  void ClearAdHocTestResult() {
+    ad_hoc_test_result_.Clear();
+  }
+
+  // Adds a TestProperty to the current TestResult object when invoked in a
+  // context of a test or a test case, or to the global property set. If the
+  // result already contains a property with the same key, the value will be
+  // updated.
+  void RecordProperty(const TestProperty& test_property);
+
+  enum ReactionToSharding {
+    HONOR_SHARDING_PROTOCOL,
+    IGNORE_SHARDING_PROTOCOL
+  };
+
+  // Matches the full name of each test against the user-specified
+  // filter to decide whether the test should run, then records the
+  // result in each TestCase and TestInfo object.
+  // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+  // based on sharding variables in the environment.
+  // Returns the number of tests that should run.
+  int FilterTests(ReactionToSharding shard_tests);
+
+  // Prints the names of the tests matching the user-specified filter flag.
+  void ListTestsMatchingFilter();
+
+  const TestCase* current_test_case() const { return current_test_case_; }
+  TestInfo* current_test_info() { return current_test_info_; }
+  const TestInfo* current_test_info() const { return current_test_info_; }
+
+  // Returns the vector of environments that need to be set-up/torn-down
+  // before/after the tests are run.
+  std::vector<Environment*>& environments() { return environments_; }
+
+  // Getters for the per-thread Google Test trace stack.
+  std::vector<TraceInfo>& gtest_trace_stack() {
+    return *(gtest_trace_stack_.pointer());
+  }
+  const std::vector<TraceInfo>& gtest_trace_stack() const {
+    return gtest_trace_stack_.get();
+  }
+
+#if GTEST_HAS_DEATH_TEST
+  void InitDeathTestSubprocessControlInfo() {
+    internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+  }
+  // Returns a pointer to the parsed --gtest_internal_run_death_test
+  // flag, or NULL if that flag was not specified.
+  // This information is useful only in a death test child process.
+  // Must not be called before a call to InitGoogleTest.
+  const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+    return internal_run_death_test_flag_.get();
+  }
+
+  // Returns a pointer to the current death test factory.
+  internal::DeathTestFactory* death_test_factory() {
+    return death_test_factory_.get();
+  }
+
+  void SuppressTestEventsIfInSubprocess();
+
+  friend class ReplaceDeathTestFactory;
+#endif  // GTEST_HAS_DEATH_TEST
+
+  // Initializes the event listener performing XML output as specified by
+  // UnitTestOptions. Must not be called before InitGoogleTest.
+  void ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+  // Initializes the event listener for streaming test results to a socket.
+  // Must not be called before InitGoogleTest.
+  void ConfigureStreamingOutput();
+#endif
+
+  // Performs initialization dependent upon flag values obtained in
+  // ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to
+  // ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest
+  // this function is also called from RunAllTests.  Since this function can be
+  // called more than once, it has to be idempotent.
+  void PostFlagParsingInit();
+
+  // Gets the random seed used at the start of the current test iteration.
+  int random_seed() const { return random_seed_; }
+
+  // Gets the random number generator.
+  internal::Random* random() { return &random_; }
+
+  // Shuffles all test cases, and the tests within each test case,
+  // making sure that death tests are still run first.
+  void ShuffleTests();
+
+  // Restores the test cases and tests to their order before the first shuffle.
+  void UnshuffleTests();
+
+  // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
+  // UnitTest::Run() starts.
+  bool catch_exceptions() const { return catch_exceptions_; }
+
+ private:
+  friend class ::testing::UnitTest;
+
+  // Used by UnitTest::Run() to capture the state of
+  // GTEST_FLAG(catch_exceptions) at the moment it starts.
+  void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
+
+  // The UnitTest object that owns this implementation object.
+  UnitTest* const parent_;
+
+  // The working directory when the first TEST() or TEST_F() was
+  // executed.
+  internal::FilePath original_working_dir_;
+
+  // The default test part result reporters.
+  DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+  DefaultPerThreadTestPartResultReporter
+      default_per_thread_test_part_result_reporter_;
+
+  // Points to (but doesn't own) the global test part result reporter.
+  TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+  // Protects read and write access to global_test_part_result_reporter_.
+  internal::Mutex global_test_part_result_reporter_mutex_;
+
+  // Points to (but doesn't own) the per-thread test part result reporter.
+  internal::ThreadLocal<TestPartResultReporterInterface*>
+      per_thread_test_part_result_reporter_;
+
+  // The vector of environments that need to be set-up/torn-down
+  // before/after the tests are run.
+  std::vector<Environment*> environments_;
+
+  // The vector of TestCases in their original order.  It owns the
+  // elements in the vector.
+  std::vector<TestCase*> test_cases_;
+
+  // Provides a level of indirection for the test case list to allow
+  // easy shuffling and restoring the test case order.  The i-th
+  // element of this vector is the index of the i-th test case in the
+  // shuffled order.
+  std::vector<int> test_case_indices_;
+
+#if GTEST_HAS_PARAM_TEST
+  // ParameterizedTestRegistry object used to register value-parameterized
+  // tests.
+  internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
+
+  // Indicates whether RegisterParameterizedTests() has been called already.
+  bool parameterized_tests_registered_;
+#endif  // GTEST_HAS_PARAM_TEST
+
+  // Index of the last death test case registered.  Initially -1.
+  int last_death_test_case_;
+
+  // This points to the TestCase for the currently running test.  It
+  // changes as Google Test goes through one test case after another.
+  // When no test is running, this is set to NULL and Google Test
+  // stores assertion results in ad_hoc_test_result_.  Initially NULL.
+  TestCase* current_test_case_;
+
+  // This points to the TestInfo for the currently running test.  It
+  // changes as Google Test goes through one test after another.  When
+  // no test is running, this is set to NULL and Google Test stores
+  // assertion results in ad_hoc_test_result_.  Initially NULL.
+  TestInfo* current_test_info_;
+
+  // Normally, a user only writes assertions inside a TEST or TEST_F,
+  // or inside a function called by a TEST or TEST_F.  Since Google
+  // Test keeps track of which test is current running, it can
+  // associate such an assertion with the test it belongs to.
+  //
+  // If an assertion is encountered when no TEST or TEST_F is running,
+  // Google Test attributes the assertion result to an imaginary "ad hoc"
+  // test, and records the result in ad_hoc_test_result_.
+  TestResult ad_hoc_test_result_;
+
+  // The list of event listeners that can be used to track events inside
+  // Google Test.
+  TestEventListeners listeners_;
+
+  // The OS stack trace getter.  Will be deleted when the UnitTest
+  // object is destructed.  By default, an OsStackTraceGetter is used,
+  // but the user can set this field to use a custom getter if that is
+  // desired.
+  OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+  // True iff PostFlagParsingInit() has been called.
+  bool post_flag_parse_init_performed_;
+
+  // The random number seed used at the beginning of the test run.
+  int random_seed_;
+
+  // Our random number generator.
+  internal::Random random_;
+
+  // The time of the test program start, in ms from the start of the
+  // UNIX epoch.
+  TimeInMillis start_timestamp_;
+
+  // How long the test took to run, in milliseconds.
+  TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+  // The decomposed components of the gtest_internal_run_death_test flag,
+  // parsed when RUN_ALL_TESTS is called.
+  internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+  internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif  // GTEST_HAS_DEATH_TEST
+
+  // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+  internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+  // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
+  // starts.
+  bool catch_exceptions_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+};  // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+  return UnitTest::GetInstance()->impl();
+}
+
+#if GTEST_USES_SIMPLE_RE
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsAsciiDigit(char ch);
+GTEST_API_ bool IsAsciiPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsAsciiWhiteSpace(char ch);
+GTEST_API_ bool IsAsciiWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+    bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+#endif  // GTEST_USES_SIMPLE_RE
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+GTEST_API_ std::string GetLastErrnoDescription();
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter.  Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+  // Fail fast if the given string does not begin with a digit;
+  // this bypasses strtoXXX's "optional leading whitespace and plus
+  // or minus sign" semantics, which are undesirable here.
+  if (str.empty() || !IsDigit(str[0])) {
+    return false;
+  }
+  errno = 0;
+
+  char* end;
+  // BiggestConvertible is the largest integer type that system-provided
+  // string-to-number conversion routines can return.
+
+# if GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+  // MSVC and C++ Builder define __int64 instead of the standard long long.
+  typedef unsigned __int64 BiggestConvertible;
+  const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+
+# else
+
+  typedef unsigned long long BiggestConvertible;  // NOLINT
+  const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+
+# endif  // GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+  const bool parse_success = *end == '\0' && errno == 0;
+
+  // TODO(vladl@google.com): Convert this to compile time assertion when it is
+  // available.
+  GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+  const Integer result = static_cast<Integer>(parsed);
+  if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+    *number = result;
+    return true;
+  }
+  return false;
+}
+#endif  // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+  static void RecordProperty(TestResult* test_result,
+                             const std::string& xml_element,
+                             const TestProperty& property) {
+    test_result->RecordProperty(xml_element, property);
+  }
+
+  static void ClearTestPartResults(TestResult* test_result) {
+    test_result->ClearTestPartResults();
+  }
+
+  static const std::vector<testing::TestPartResult>& test_part_results(
+      const TestResult& test_result) {
+    return test_result.test_part_results();
+  }
+};
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Streams test results to the given port on the given host machine.
+class GTEST_API_ StreamingListener : public EmptyTestEventListener {
+ public:
+  // Abstract base class for writing strings to a socket.
+  class AbstractSocketWriter {
+   public:
+    virtual ~AbstractSocketWriter() {}
+
+    // Sends a string to the socket.
+    virtual void Send(const string& message) = 0;
+
+    // Closes the socket.
+    virtual void CloseConnection() {}
+
+    // Sends a string and a newline to the socket.
+    void SendLn(const string& message) {
+      Send(message + "\n");
+    }
+  };
+
+  // Concrete class for actually writing strings to a socket.
+  class SocketWriter : public AbstractSocketWriter {
+   public:
+    SocketWriter(const string& host, const string& port)
+        : sockfd_(-1), host_name_(host), port_num_(port) {
+      MakeConnection();
+    }
+
+    virtual ~SocketWriter() {
+      if (sockfd_ != -1)
+        CloseConnection();
+    }
+
+    // Sends a string to the socket.
+    virtual void Send(const string& message) {
+      GTEST_CHECK_(sockfd_ != -1)
+          << "Send() can be called only when there is a connection.";
+
+      const int len = static_cast<int>(message.length());
+      if (write(sockfd_, message.c_str(), len) != len) {
+        GTEST_LOG_(WARNING)
+            << "stream_result_to: failed to stream to "
+            << host_name_ << ":" << port_num_;
+      }
+    }
+
+   private:
+    // Creates a client socket and connects to the server.
+    void MakeConnection();
+
+    // Closes the socket.
+    void CloseConnection() {
+      GTEST_CHECK_(sockfd_ != -1)
+          << "CloseConnection() can be called only when there is a connection.";
+
+      close(sockfd_);
+      sockfd_ = -1;
+    }
+
+    int sockfd_;  // socket file descriptor
+    const string host_name_;
+    const string port_num_;
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);
+  };  // class SocketWriter
+
+  // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
+  static string UrlEncode(const char* str);
+
+  StreamingListener(const string& host, const string& port)
+      : socket_writer_(new SocketWriter(host, port)) { Start(); }
+
+  explicit StreamingListener(AbstractSocketWriter* socket_writer)
+      : socket_writer_(socket_writer) { Start(); }
+
+  void OnTestProgramStart(const UnitTest& /* unit_test */) {
+    SendLn("event=TestProgramStart");
+  }
+
+  void OnTestProgramEnd(const UnitTest& unit_test) {
+    // Note that Google Test current only report elapsed time for each
+    // test iteration, not for the entire test program.
+    SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed()));
+
+    // Notify the streaming server to stop.
+    socket_writer_->CloseConnection();
+  }
+
+  void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {
+    SendLn("event=TestIterationStart&iteration=" +
+           StreamableToString(iteration));
+  }
+
+  void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {
+    SendLn("event=TestIterationEnd&passed=" +
+           FormatBool(unit_test.Passed()) + "&elapsed_time=" +
+           StreamableToString(unit_test.elapsed_time()) + "ms");
+  }
+
+  void OnTestCaseStart(const TestCase& test_case) {
+    SendLn(std::string("event=TestCaseStart&name=") + test_case.name());
+  }
+
+  void OnTestCaseEnd(const TestCase& test_case) {
+    SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed())
+           + "&elapsed_time=" + StreamableToString(test_case.elapsed_time())
+           + "ms");
+  }
+
+  void OnTestStart(const TestInfo& test_info) {
+    SendLn(std::string("event=TestStart&name=") + test_info.name());
+  }
+
+  void OnTestEnd(const TestInfo& test_info) {
+    SendLn("event=TestEnd&passed=" +
+           FormatBool((test_info.result())->Passed()) +
+           "&elapsed_time=" +
+           StreamableToString((test_info.result())->elapsed_time()) + "ms");
+  }
+
+  void OnTestPartResult(const TestPartResult& test_part_result) {
+    const char* file_name = test_part_result.file_name();
+    if (file_name == NULL)
+      file_name = "";
+    SendLn("event=TestPartResult&file=" + UrlEncode(file_name) +
+           "&line=" + StreamableToString(test_part_result.line_number()) +
+           "&message=" + UrlEncode(test_part_result.message()));
+  }
+
+ private:
+  // Sends the given message and a newline to the socket.
+  void SendLn(const string& message) { socket_writer_->SendLn(message); }
+
+  // Called at the start of streaming to notify the receiver what
+  // protocol we are using.
+  void Start() { SendLn("gtest_streaming_protocol_version=1.0"); }
+
+  string FormatBool(bool value) { return value ? "1" : "0"; }
+
+  const scoped_ptr<AbstractSocketWriter> socket_writer_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
+};  // class StreamingListener
+
+#endif  // GTEST_CAN_STREAM_RESULTS_
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_SRC_GTEST_INTERNAL_INL_H_
+#undef GTEST_IMPLEMENTATION_
+
+#if GTEST_OS_WINDOWS
+# define vsnprintf _vsnprintf
+#endif  // GTEST_OS_WINDOWS
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test case name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test case whose name matches this filter is considered a death
+// test case and will be run before test cases whose name doesn't
+// match this filter.
+static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output file for XML output.
+static const char kDefaultOutputFile[] = "test_detail.xml";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
+}  // namespace internal
+
+static const char* GetDefaultFilter() {
+#ifdef GTEST_TEST_FILTER_ENV_VAR_
+  const char* const testbridge_test_only = getenv(GTEST_TEST_FILTER_ENV_VAR_);
+  if (testbridge_test_only != NULL) {
+    return testbridge_test_only;
+  }
+#endif  // GTEST_TEST_FILTER_ENV_VAR_
+  return kUniversalFilter;
+}
+
+GTEST_DEFINE_bool_(
+    also_run_disabled_tests,
+    internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+    "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+    break_on_failure,
+    internal::BoolFromGTestEnv("break_on_failure", false),
+    "True iff a failed assertion should be a debugger break-point.");
+
+GTEST_DEFINE_bool_(
+    catch_exceptions,
+    internal::BoolFromGTestEnv("catch_exceptions", true),
+    "True iff " GTEST_NAME_
+    " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+    color,
+    internal::StringFromGTestEnv("color", "auto"),
+    "Whether to use colors in the output.  Valid values: yes, no, "
+    "and auto.  'auto' means to use colors if the output is "
+    "being sent to a terminal and the TERM environment variable "
+    "is set to a terminal type that supports colors.");
+
+GTEST_DEFINE_string_(
+    filter,
+    internal::StringFromGTestEnv("filter", GetDefaultFilter()),
+    "A colon-separated list of glob (not regex) patterns "
+    "for filtering the tests to run, optionally followed by a "
+    "'-' and a : separated list of negative patterns (tests to "
+    "exclude).  A test is run if it matches one of the positive "
+    "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+                   "List all tests without running them.");
+
+GTEST_DEFINE_string_(
+    output,
+    internal::StringFromGTestEnv("output", ""),
+    "A format (currently must be \"xml\"), optionally followed "
+    "by a colon and an output file name or directory. A directory "
+    "is indicated by a trailing pathname separator. "
+    "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+    "If a directory is specified, output files will be created "
+    "within that directory, with file-names based on the test "
+    "executable's name and, if necessary, made unique by adding "
+    "digits.");
+
+GTEST_DEFINE_bool_(
+    print_time,
+    internal::BoolFromGTestEnv("print_time", true),
+    "True iff " GTEST_NAME_
+    " should display elapsed time in text output.");
+
+GTEST_DEFINE_int32_(
+    random_seed,
+    internal::Int32FromGTestEnv("random_seed", 0),
+    "Random number seed to use when shuffling test orders.  Must be in range "
+    "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+    repeat,
+    internal::Int32FromGTestEnv("repeat", 1),
+    "How many times to repeat each test.  Specify a negative number "
+    "for repeating forever.  Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(
+    show_internal_stack_frames, false,
+    "True iff " GTEST_NAME_ " should include internal stack frames when "
+    "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+    shuffle,
+    internal::BoolFromGTestEnv("shuffle", false),
+    "True iff " GTEST_NAME_
+    " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+    stack_trace_depth,
+    internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+    "The maximum number of stack frames to print when an "
+    "assertion fails.  The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_string_(
+    stream_result_to,
+    internal::StringFromGTestEnv("stream_result_to", ""),
+    "This flag specifies the host name and the port number on which to stream "
+    "test results. Example: \"localhost:555\". The flag is effective only on "
+    "Linux.");
+
+GTEST_DEFINE_bool_(
+    throw_on_failure,
+    internal::BoolFromGTestEnv("throw_on_failure", false),
+    "When this flag is specified, a failed assertion will throw an exception "
+    "if exceptions are enabled or exit the program with a non-zero code "
+    "otherwise.");
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+GTEST_DEFINE_string_(
+    flagfile,
+    internal::StringFromGTestEnv("flagfile", ""),
+    "This flag specifies the flagfile to read command-line flags from.");
+#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG).  Crashes if 'range' is 0 or greater
+// than kMaxRange.
+UInt32 Random::Generate(UInt32 range) {
+  // These constants are the same as are used in glibc's rand(3).
+  state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+  GTEST_CHECK_(range > 0)
+      << "Cannot generate a number in the range [0, 0).";
+  GTEST_CHECK_(range <= kMaxRange)
+      << "Generation of a number in [0, " << range << ") was requested, "
+      << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+  // Converting via modulus introduces a bit of downward bias, but
+  // it's simple, and a linear congruential generator isn't too good
+  // to begin with.
+  return state_ % range;
+}
+
+// GTestIsInitialized() returns true iff the user has initialized
+// Google Test.  Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+static bool GTestIsInitialized() { return GetArgvs().size() > 0; }
+
+// Iterates over a vector of TestCases, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
+                               int (TestCase::*method)() const) {
+  int sum = 0;
+  for (size_t i = 0; i < case_list.size(); i++) {
+    sum += (case_list[i]->*method)();
+  }
+  return sum;
+}
+
+// Returns true iff the test case passed.
+static bool TestCasePassed(const TestCase* test_case) {
+  return test_case->should_run() && test_case->Passed();
+}
+
+// Returns true iff the test case failed.
+static bool TestCaseFailed(const TestCase* test_case) {
+  return test_case->should_run() && test_case->Failed();
+}
+
+// Returns true iff test_case contains at least one test that should
+// run.
+static bool ShouldRunTestCase(const TestCase* test_case) {
+  return test_case->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+                           const char* file,
+                           int line,
+                           const char* message)
+    : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+  delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+  UnitTest::GetInstance()->
+    AddTestPartResult(data_->type, data_->file, data_->line,
+                      AppendUserMessage(data_->message, message),
+                      UnitTest::GetInstance()->impl()
+                      ->CurrentOsStackTraceExceptTop(1)
+                      // Skips the stack frame for this function itself.
+                      );  // NOLINT
+}
+
+// Mutex for linked pointers.
+GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// A copy of all command line arguments.  Set by InitGoogleTest().
+::std::vector<testing::internal::string> g_argvs;
+
+const ::std::vector<testing::internal::string>& GetArgvs() {
+#if defined(GTEST_CUSTOM_GET_ARGVS_)
+  return GTEST_CUSTOM_GET_ARGVS_();
+#else  // defined(GTEST_CUSTOM_GET_ARGVS_)
+  return g_argvs;
+#endif  // defined(GTEST_CUSTOM_GET_ARGVS_)
+}
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+  FilePath result;
+
+#if GTEST_OS_WINDOWS
+  result.Set(FilePath(GetArgvs()[0]).RemoveExtension("exe"));
+#else
+  result.Set(FilePath(GetArgvs()[0]));
+#endif  // GTEST_OS_WINDOWS
+
+  return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+std::string UnitTestOptions::GetOutputFormat() {
+  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+  if (gtest_output_flag == NULL) return std::string("");
+
+  const char* const colon = strchr(gtest_output_flag, ':');
+  return (colon == NULL) ?
+      std::string(gtest_output_flag) :
+      std::string(gtest_output_flag, colon - gtest_output_flag);
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
+  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+  if (gtest_output_flag == NULL)
+    return "";
+
+  const char* const colon = strchr(gtest_output_flag, ':');
+  if (colon == NULL)
+    return internal::FilePath::ConcatPaths(
+        internal::FilePath(
+            UnitTest::GetInstance()->original_working_dir()),
+        internal::FilePath(kDefaultOutputFile)).string();
+
+  internal::FilePath output_name(colon + 1);
+  if (!output_name.IsAbsolutePath())
+    // TODO(wan@google.com): on Windows \some\path is not an absolute
+    // path (as its meaning depends on the current drive), yet the
+    // following logic for turning it into an absolute path is wrong.
+    // Fix it.
+    output_name = internal::FilePath::ConcatPaths(
+        internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+        internal::FilePath(colon + 1));
+
+  if (!output_name.IsDirectory())
+    return output_name.string();
+
+  internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+      output_name, internal::GetCurrentExecutableName(),
+      GetOutputFormat().c_str()));
+  return result.string();
+}
+
+// Returns true iff the wildcard pattern matches the string.  The
+// first ':' or '\0' character in pattern marks the end of it.
+//
+// This recursive algorithm isn't very efficient, but is clear and
+// works well enough for matching test names, which are short.
+bool UnitTestOptions::PatternMatchesString(const char *pattern,
+                                           const char *str) {
+  switch (*pattern) {
+    case '\0':
+    case ':':  // Either ':' or '\0' marks the end of the pattern.
+      return *str == '\0';
+    case '?':  // Matches any single character.
+      return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
+    case '*':  // Matches any string (possibly empty) of characters.
+      return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+          PatternMatchesString(pattern + 1, str);
+    default:  // Non-special character.  Matches itself.
+      return *pattern == *str &&
+          PatternMatchesString(pattern + 1, str + 1);
+  }
+}
+
+bool UnitTestOptions::MatchesFilter(
+    const std::string& name, const char* filter) {
+  const char *cur_pattern = filter;
+  for (;;) {
+    if (PatternMatchesString(cur_pattern, name.c_str())) {
+      return true;
+    }
+
+    // Finds the next pattern in the filter.
+    cur_pattern = strchr(cur_pattern, ':');
+
+    // Returns if no more pattern can be found.
+    if (cur_pattern == NULL) {
+      return false;
+    }
+
+    // Skips the pattern separater (the ':' character).
+    cur_pattern++;
+  }
+}
+
+// Returns true iff the user-specified filter matches the test case
+// name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name,
+                                        const std::string &test_name) {
+  const std::string& full_name = test_case_name + "." + test_name.c_str();
+
+  // Split --gtest_filter at '-', if there is one, to separate into
+  // positive filter and negative filter portions
+  const char* const p = GTEST_FLAG(filter).c_str();
+  const char* const dash = strchr(p, '-');
+  std::string positive;
+  std::string negative;
+  if (dash == NULL) {
+    positive = GTEST_FLAG(filter).c_str();  // Whole string is a positive filter
+    negative = "";
+  } else {
+    positive = std::string(p, dash);   // Everything up to the dash
+    negative = std::string(dash + 1);  // Everything after the dash
+    if (positive.empty()) {
+      // Treat '-test1' as the same as '*-test1'
+      positive = kUniversalFilter;
+    }
+  }
+
+  // A filter is a colon-separated list of patterns.  It matches a
+  // test if any pattern in it matches the test.
+  return (MatchesFilter(full_name, positive.c_str()) &&
+          !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_HAS_SEH
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+  // Google Test should handle a SEH exception if:
+  //   1. the user wants it to, AND
+  //   2. this is not a breakpoint exception, AND
+  //   3. this is not a C++ exception (VC++ implements them via SEH,
+  //      apparently).
+  //
+  // SEH exception code for C++ exceptions.
+  // (see http://support.microsoft.com/kb/185294 for more information).
+  const DWORD kCxxExceptionCode = 0xe06d7363;
+
+  bool should_handle = true;
+
+  if (!GTEST_FLAG(catch_exceptions))
+    should_handle = false;
+  else if (exception_code == EXCEPTION_BREAKPOINT)
+    should_handle = false;
+  else if (exception_code == kCxxExceptionCode)
+    should_handle = false;
+
+  return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+#endif  // GTEST_HAS_SEH
+
+}  // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test.  The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+    TestPartResultArray* result)
+    : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+      result_(result) {
+  Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test.  The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+    InterceptMode intercept_mode, TestPartResultArray* result)
+    : intercept_mode_(intercept_mode),
+      result_(result) {
+  Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+    old_reporter_ = impl->GetGlobalTestPartResultReporter();
+    impl->SetGlobalTestPartResultReporter(this);
+  } else {
+    old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+    impl->SetTestPartResultReporterForCurrentThread(this);
+  }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+    impl->SetGlobalTestPartResultReporter(old_reporter_);
+  } else {
+    impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+  }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test.  We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test.  This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X.  The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code.  GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+  return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library.  This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+AssertionResult HasOneFailure(const char* /* results_expr */,
+                              const char* /* type_expr */,
+                              const char* /* substr_expr */,
+                              const TestPartResultArray& results,
+                              TestPartResult::Type type,
+                              const string& substr) {
+  const std::string expected(type == TestPartResult::kFatalFailure ?
+                        "1 fatal failure" :
+                        "1 non-fatal failure");
+  Message msg;
+  if (results.size() != 1) {
+    msg << "Expected: " << expected << "\n"
+        << "  Actual: " << results.size() << " failures";
+    for (int i = 0; i < results.size(); i++) {
+      msg << "\n" << results.GetTestPartResult(i);
+    }
+    return AssertionFailure() << msg;
+  }
+
+  const TestPartResult& r = results.GetTestPartResult(0);
+  if (r.type() != type) {
+    return AssertionFailure() << "Expected: " << expected << "\n"
+                              << "  Actual:\n"
+                              << r;
+  }
+
+  if (strstr(r.message(), substr.c_str()) == NULL) {
+    return AssertionFailure() << "Expected: " << expected << " containing \""
+                              << substr << "\"\n"
+                              << "  Actual:\n"
+                              << r;
+  }
+
+  return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker:: SingleFailureChecker(
+    const TestPartResultArray* results,
+    TestPartResult::Type type,
+    const string& substr)
+    : results_(results),
+      type_(type),
+      substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring.  If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+  EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  unit_test_->current_test_result()->AddTestPartResult(result);
+  unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+  return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+    TestPartResultReporterInterface* reporter) {
+  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+  global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+  return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+    TestPartResultReporterInterface* reporter) {
+  per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test cases.
+int UnitTestImpl::successful_test_case_count() const {
+  return CountIf(test_cases_, TestCasePassed);
+}
+
+// Gets the number of failed test cases.
+int UnitTestImpl::failed_test_case_count() const {
+  return CountIf(test_cases_, TestCaseFailed);
+}
+
+// Gets the number of all test cases.
+int UnitTestImpl::total_test_case_count() const {
+  return static_cast<int>(test_cases_.size());
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTestImpl::test_case_to_run_count() const {
+  return CountIf(test_cases_, ShouldRunTestCase);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTestImpl::reportable_disabled_test_count() const {
+  return SumOverTestCaseList(test_cases_,
+                             &TestCase::reportable_disabled_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTestImpl::reportable_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag.  The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+  return os_stack_trace_getter()->CurrentStackTrace(
+      static_cast<int>(GTEST_FLAG(stack_trace_depth)),
+      skip_count + 1
+      // Skips the user-specified number of frames plus this function
+      // itself.
+      );  // NOLINT
+}
+
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+  // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
+  // http://analogous.blogspot.com/2005/04/epoch.html
+  const TimeInMillis kJavaEpochToWinFileTimeDelta =
+    static_cast<TimeInMillis>(116444736UL) * 100000UL;
+  const DWORD kTenthMicrosInMilliSecond = 10000;
+
+  SYSTEMTIME now_systime;
+  FILETIME now_filetime;
+  ULARGE_INTEGER now_int64;
+  // TODO(kenton@google.com): Shouldn't this just use
+  //   GetSystemTimeAsFileTime()?
+  GetSystemTime(&now_systime);
+  if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
+    now_int64.LowPart = now_filetime.dwLowDateTime;
+    now_int64.HighPart = now_filetime.dwHighDateTime;
+    now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
+      kJavaEpochToWinFileTimeDelta;
+    return now_int64.QuadPart;
+  }
+  return 0;
+#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
+  __timeb64 now;
+
+  // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
+  // (deprecated function) there.
+  // TODO(kenton@google.com): Use GetTickCount()?  Or use
+  //   SystemTimeToFileTime()
+  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
+  _ftime64(&now);
+  GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+  return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
+#elif GTEST_HAS_GETTIMEOFDAY_
+  struct timeval now;
+  gettimeofday(&now, NULL);
+  return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
+#else
+# error "Don't know how to get the current time on your system."
+#endif
+}
+
+// Utilities
+
+// class String.
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Creates a UTF-16 wide string from the given ANSI string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the wide string, or NULL if the
+// input is NULL.
+LPCWSTR String::AnsiToUtf16(const char* ansi) {
+  if (!ansi) return NULL;
+  const int length = strlen(ansi);
+  const int unicode_length =
+      MultiByteToWideChar(CP_ACP, 0, ansi, length,
+                          NULL, 0);
+  WCHAR* unicode = new WCHAR[unicode_length + 1];
+  MultiByteToWideChar(CP_ACP, 0, ansi, length,
+                      unicode, unicode_length);
+  unicode[unicode_length] = 0;
+  return unicode;
+}
+
+// Creates an ANSI string from the given wide string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the ANSI string, or NULL if the
+// input is NULL.
+const char* String::Utf16ToAnsi(LPCWSTR utf16_str)  {
+  if (!utf16_str) return NULL;
+  const int ansi_length =
+      WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+                          NULL, 0, NULL, NULL);
+  char* ansi = new char[ansi_length + 1];
+  WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+                      ansi, ansi_length, NULL, NULL);
+  ansi[ansi_length] = 0;
+  return ansi;
+}
+
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+// Compares two C strings.  Returns true iff they have the same content.
+//
+// Unlike strcmp(), this function can handle NULL argument(s).  A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CStringEquals(const char * lhs, const char * rhs) {
+  if ( lhs == NULL ) return rhs == NULL;
+
+  if ( rhs == NULL ) return false;
+
+  return strcmp(lhs, rhs) == 0;
+}
+
+#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+// Converts an array of wide chars to a narrow string using the UTF-8
+// encoding, and streams the result to the given Message object.
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
+                                     Message* msg) {
+  for (size_t i = 0; i != length; ) {  // NOLINT
+    if (wstr[i] != L'\0') {
+      *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+      while (i != length && wstr[i] != L'\0')
+        i++;
+    } else {
+      *msg << '\0';
+      i++;
+    }
+  }
+}
+
+#endif  // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+void SplitString(const ::std::string& str, char delimiter,
+                 ::std::vector< ::std::string>* dest) {
+  ::std::vector< ::std::string> parsed;
+  ::std::string::size_type pos = 0;
+  while (::testing::internal::AlwaysTrue()) {
+    const ::std::string::size_type colon = str.find(delimiter, pos);
+    if (colon == ::std::string::npos) {
+      parsed.push_back(str.substr(pos));
+      break;
+    } else {
+      parsed.push_back(str.substr(pos, colon - pos));
+      pos = colon + 1;
+    }
+  }
+  dest->swap(parsed);
+}
+
+}  // namespace internal
+
+// Constructs an empty Message.
+// We allocate the stringstream separately because otherwise each use of
+// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
+// stack frame leading to huge stack frames in some cases; gcc does not reuse
+// the stack space.
+Message::Message() : ss_(new ::std::stringstream) {
+  // By default, we want there to be enough precision when printing
+  // a double to a Message.
+  *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+}
+
+// These two overloads allow streaming a wide C string to a Message
+// using the UTF-8 encoding.
+Message& Message::operator <<(const wchar_t* wide_c_str) {
+  return *this << internal::String::ShowWideCString(wide_c_str);
+}
+Message& Message::operator <<(wchar_t* wide_c_str) {
+  return *this << internal::String::ShowWideCString(wide_c_str);
+}
+
+#if GTEST_HAS_STD_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::std::wstring& wstr) {
+  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+  return *this;
+}
+#endif  // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::wstring& wstr) {
+  internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+  return *this;
+}
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+// Gets the text streamed to this object so far as an std::string.
+// Each '\0' character in the buffer is replaced with "\\0".
+std::string Message::GetString() const {
+  return internal::StringStreamToString(ss_.get());
+}
+
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+    : success_(other.success_),
+      message_(other.message_.get() != NULL ?
+               new ::std::string(*other.message_) :
+               static_cast< ::std::string*>(NULL)) {
+}
+
+// Swaps two AssertionResults.
+void AssertionResult::swap(AssertionResult& other) {
+  using std::swap;
+  swap(success_, other.success_);
+  swap(message_, other.message_);
+}
+
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+  AssertionResult negation(!success_);
+  if (message_.get() != NULL)
+    negation << *message_;
+  return negation;
+}
+
+// Makes a successful assertion result.
+AssertionResult AssertionSuccess() {
+  return AssertionResult(true);
+}
+
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+  return AssertionResult(false);
+}
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
+AssertionResult AssertionFailure(const Message& message) {
+  return AssertionFailure() << message;
+}
+
+namespace internal {
+
+namespace edit_distance {
+std::vector<EditType> CalculateOptimalEdits(const std::vector<size_t>& left,
+                                            const std::vector<size_t>& right) {
+  std::vector<std::vector<double> > costs(
+      left.size() + 1, std::vector<double>(right.size() + 1));
+  std::vector<std::vector<EditType> > best_move(
+      left.size() + 1, std::vector<EditType>(right.size() + 1));
+
+  // Populate for empty right.
+  for (size_t l_i = 0; l_i < costs.size(); ++l_i) {
+    costs[l_i][0] = static_cast<double>(l_i);
+    best_move[l_i][0] = kRemove;
+  }
+  // Populate for empty left.
+  for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) {
+    costs[0][r_i] = static_cast<double>(r_i);
+    best_move[0][r_i] = kAdd;
+  }
+
+  for (size_t l_i = 0; l_i < left.size(); ++l_i) {
+    for (size_t r_i = 0; r_i < right.size(); ++r_i) {
+      if (left[l_i] == right[r_i]) {
+        // Found a match. Consume it.
+        costs[l_i + 1][r_i + 1] = costs[l_i][r_i];
+        best_move[l_i + 1][r_i + 1] = kMatch;
+        continue;
+      }
+
+      const double add = costs[l_i + 1][r_i];
+      const double remove = costs[l_i][r_i + 1];
+      const double replace = costs[l_i][r_i];
+      if (add < remove && add < replace) {
+        costs[l_i + 1][r_i + 1] = add + 1;
+        best_move[l_i + 1][r_i + 1] = kAdd;
+      } else if (remove < add && remove < replace) {
+        costs[l_i + 1][r_i + 1] = remove + 1;
+        best_move[l_i + 1][r_i + 1] = kRemove;
+      } else {
+        // We make replace a little more expensive than add/remove to lower
+        // their priority.
+        costs[l_i + 1][r_i + 1] = replace + 1.00001;
+        best_move[l_i + 1][r_i + 1] = kReplace;
+      }
+    }
+  }
+
+  // Reconstruct the best path. We do it in reverse order.
+  std::vector<EditType> best_path;
+  for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) {
+    EditType move = best_move[l_i][r_i];
+    best_path.push_back(move);
+    l_i -= move != kAdd;
+    r_i -= move != kRemove;
+  }
+  std::reverse(best_path.begin(), best_path.end());
+  return best_path;
+}
+
+namespace {
+
+// Helper class to convert string into ids with deduplication.
+class InternalStrings {
+ public:
+  size_t GetId(const std::string& str) {
+    IdMap::iterator it = ids_.find(str);
+    if (it != ids_.end()) return it->second;
+    size_t id = ids_.size();
+    return ids_[str] = id;
+  }
+
+ private:
+  typedef std::map<std::string, size_t> IdMap;
+  IdMap ids_;
+};
+
+}  // namespace
+
+std::vector<EditType> CalculateOptimalEdits(
+    const std::vector<std::string>& left,
+    const std::vector<std::string>& right) {
+  std::vector<size_t> left_ids, right_ids;
+  {
+    InternalStrings intern_table;
+    for (size_t i = 0; i < left.size(); ++i) {
+      left_ids.push_back(intern_table.GetId(left[i]));
+    }
+    for (size_t i = 0; i < right.size(); ++i) {
+      right_ids.push_back(intern_table.GetId(right[i]));
+    }
+  }
+  return CalculateOptimalEdits(left_ids, right_ids);
+}
+
+namespace {
+
+// Helper class that holds the state for one hunk and prints it out to the
+// stream.
+// It reorders adds/removes when possible to group all removes before all
+// adds. It also adds the hunk header before printint into the stream.
+class Hunk {
+ public:
+  Hunk(size_t left_start, size_t right_start)
+      : left_start_(left_start),
+        right_start_(right_start),
+        adds_(),
+        removes_(),
+        common_() {}
+
+  void PushLine(char edit, const char* line) {
+    switch (edit) {
+      case ' ':
+        ++common_;
+        FlushEdits();
+        hunk_.push_back(std::make_pair(' ', line));
+        break;
+      case '-':
+        ++removes_;
+        hunk_removes_.push_back(std::make_pair('-', line));
+        break;
+      case '+':
+        ++adds_;
+        hunk_adds_.push_back(std::make_pair('+', line));
+        break;
+    }
+  }
+
+  void PrintTo(std::ostream* os) {
+    PrintHeader(os);
+    FlushEdits();
+    for (std::list<std::pair<char, const char*> >::const_iterator it =
+             hunk_.begin();
+         it != hunk_.end(); ++it) {
+      *os << it->first << it->second << "\n";
+    }
+  }
+
+  bool has_edits() const { return adds_ || removes_; }
+
+ private:
+  void FlushEdits() {
+    hunk_.splice(hunk_.end(), hunk_removes_);
+    hunk_.splice(hunk_.end(), hunk_adds_);
+  }
+
+  // Print a unified diff header for one hunk.
+  // The format is
+  //   "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
+  // where the left/right parts are ommitted if unnecessary.
+  void PrintHeader(std::ostream* ss) const {
+    *ss << "@@ ";
+    if (removes_) {
+      *ss << "-" << left_start_ << "," << (removes_ + common_);
+    }
+    if (removes_ && adds_) {
+      *ss << " ";
+    }
+    if (adds_) {
+      *ss << "+" << right_start_ << "," << (adds_ + common_);
+    }
+    *ss << " @@\n";
+  }
+
+  size_t left_start_, right_start_;
+  size_t adds_, removes_, common_;
+  std::list<std::pair<char, const char*> > hunk_, hunk_adds_, hunk_removes_;
+};
+
+}  // namespace
+
+// Create a list of diff hunks in Unified diff format.
+// Each hunk has a header generated by PrintHeader above plus a body with
+// lines prefixed with ' ' for no change, '-' for deletion and '+' for
+// addition.
+// 'context' represents the desired unchanged prefix/suffix around the diff.
+// If two hunks are close enough that their contexts overlap, then they are
+// joined into one hunk.
+std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+                              const std::vector<std::string>& right,
+                              size_t context) {
+  const std::vector<EditType> edits = CalculateOptimalEdits(left, right);
+
+  size_t l_i = 0, r_i = 0, edit_i = 0;
+  std::stringstream ss;
+  while (edit_i < edits.size()) {
+    // Find first edit.
+    while (edit_i < edits.size() && edits[edit_i] == kMatch) {
+      ++l_i;
+      ++r_i;
+      ++edit_i;
+    }
+
+    // Find the first line to include in the hunk.
+    const size_t prefix_context = std::min(l_i, context);
+    Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1);
+    for (size_t i = prefix_context; i > 0; --i) {
+      hunk.PushLine(' ', left[l_i - i].c_str());
+    }
+
+    // Iterate the edits until we found enough suffix for the hunk or the input
+    // is over.
+    size_t n_suffix = 0;
+    for (; edit_i < edits.size(); ++edit_i) {
+      if (n_suffix >= context) {
+        // Continue only if the next hunk is very close.
+        std::vector<EditType>::const_iterator it = edits.begin() + edit_i;
+        while (it != edits.end() && *it == kMatch) ++it;
+        if (it == edits.end() || (it - edits.begin()) - edit_i >= context) {
+          // There is no next edit or it is too far away.
+          break;
+        }
+      }
+
+      EditType edit = edits[edit_i];
+      // Reset count when a non match is found.
+      n_suffix = edit == kMatch ? n_suffix + 1 : 0;
+
+      if (edit == kMatch || edit == kRemove || edit == kReplace) {
+        hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str());
+      }
+      if (edit == kAdd || edit == kReplace) {
+        hunk.PushLine('+', right[r_i].c_str());
+      }
+
+      // Advance indices, depending on edit type.
+      l_i += edit != kAdd;
+      r_i += edit != kRemove;
+    }
+
+    if (!hunk.has_edits()) {
+      // We are done. We don't want this hunk.
+      break;
+    }
+
+    hunk.PrintTo(&ss);
+  }
+  return ss.str();
+}
+
+}  // namespace edit_distance
+
+namespace {
+
+// The string representation of the values received in EqFailure() are already
+// escaped. Split them on escaped '\n' boundaries. Leave all other escaped
+// characters the same.
+std::vector<std::string> SplitEscapedString(const std::string& str) {
+  std::vector<std::string> lines;
+  size_t start = 0, end = str.size();
+  if (end > 2 && str[0] == '"' && str[end - 1] == '"') {
+    ++start;
+    --end;
+  }
+  bool escaped = false;
+  for (size_t i = start; i + 1 < end; ++i) {
+    if (escaped) {
+      escaped = false;
+      if (str[i] == 'n') {
+        lines.push_back(str.substr(start, i - start - 1));
+        start = i + 1;
+      }
+    } else {
+      escaped = str[i] == '\\';
+    }
+  }
+  lines.push_back(str.substr(start, end - start));
+  return lines;
+}
+
+}  // namespace
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+//   lhs_expression: "foo"
+//   rhs_expression: "bar"
+//   lhs_value:      "5"
+//   rhs_value:      "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*.  When it's true, the string "Ignoring case" will
+// be inserted into the message.
+AssertionResult EqFailure(const char* lhs_expression,
+                          const char* rhs_expression,
+                          const std::string& lhs_value,
+                          const std::string& rhs_value,
+                          bool ignoring_case) {
+  Message msg;
+  msg << "      Expected: " << lhs_expression;
+  if (lhs_value != lhs_expression) {
+    msg << "\n      Which is: " << lhs_value;
+  }
+  msg << "\nTo be equal to: " << rhs_expression;
+  if (rhs_value != rhs_expression) {
+    msg << "\n      Which is: " << rhs_value;
+  }
+
+  if (ignoring_case) {
+    msg << "\nIgnoring case";
+  }
+
+  if (!lhs_value.empty() && !rhs_value.empty()) {
+    const std::vector<std::string> lhs_lines =
+        SplitEscapedString(lhs_value);
+    const std::vector<std::string> rhs_lines =
+        SplitEscapedString(rhs_value);
+    if (lhs_lines.size() > 1 || rhs_lines.size() > 1) {
+      msg << "\nWith diff:\n"
+          << edit_distance::CreateUnifiedDiff(lhs_lines, rhs_lines);
+    }
+  }
+
+  return AssertionFailure() << msg;
+}
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+std::string GetBoolAssertionFailureMessage(
+    const AssertionResult& assertion_result,
+    const char* expression_text,
+    const char* actual_predicate_value,
+    const char* expected_predicate_value) {
+  const char* actual_message = assertion_result.message();
+  Message msg;
+  msg << "Value of: " << expression_text
+      << "\n  Actual: " << actual_predicate_value;
+  if (actual_message[0] != '\0')
+    msg << " (" << actual_message << ")";
+  msg << "\nExpected: " << expected_predicate_value;
+  return msg.GetString();
+}
+
+// Helper function for implementing ASSERT_NEAR.
+AssertionResult DoubleNearPredFormat(const char* expr1,
+                                     const char* expr2,
+                                     const char* abs_error_expr,
+                                     double val1,
+                                     double val2,
+                                     double abs_error) {
+  const double diff = fabs(val1 - val2);
+  if (diff <= abs_error) return AssertionSuccess();
+
+  // TODO(wan): do not print the value of an expression if it's
+  // already a literal.
+  return AssertionFailure()
+      << "The difference between " << expr1 << " and " << expr2
+      << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
+      << expr1 << " evaluates to " << val1 << ",\n"
+      << expr2 << " evaluates to " << val2 << ", and\n"
+      << abs_error_expr << " evaluates to " << abs_error << ".";
+}
+
+
+// Helper template for implementing FloatLE() and DoubleLE().
+template <typename RawType>
+AssertionResult FloatingPointLE(const char* expr1,
+                                const char* expr2,
+                                RawType val1,
+                                RawType val2) {
+  // Returns success if val1 is less than val2,
+  if (val1 < val2) {
+    return AssertionSuccess();
+  }
+
+  // or if val1 is almost equal to val2.
+  const FloatingPoint<RawType> lhs(val1), rhs(val2);
+  if (lhs.AlmostEquals(rhs)) {
+    return AssertionSuccess();
+  }
+
+  // Note that the above two checks will both fail if either val1 or
+  // val2 is NaN, as the IEEE floating-point standard requires that
+  // any predicate involving a NaN must return false.
+
+  ::std::stringstream val1_ss;
+  val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+          << val1;
+
+  ::std::stringstream val2_ss;
+  val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+          << val2;
+
+  return AssertionFailure()
+      << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
+      << "  Actual: " << StringStreamToString(&val1_ss) << " vs "
+      << StringStreamToString(&val2_ss);
+}
+
+}  // namespace internal
+
+// Asserts that val1 is less than, or almost equal to, val2.  Fails
+// otherwise.  In particular, it fails if either val1 or val2 is NaN.
+AssertionResult FloatLE(const char* expr1, const char* expr2,
+                        float val1, float val2) {
+  return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
+}
+
+// Asserts that val1 is less than, or almost equal to, val2.  Fails
+// otherwise.  In particular, it fails if either val1 or val2 is NaN.
+AssertionResult DoubleLE(const char* expr1, const char* expr2,
+                         double val1, double val2) {
+  return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
+}
+
+namespace internal {
+
+// The helper function for {ASSERT|EXPECT}_EQ with int or enum
+// arguments.
+AssertionResult CmpHelperEQ(const char* lhs_expression,
+                            const char* rhs_expression,
+                            BiggestInt lhs,
+                            BiggestInt rhs) {
+  if (lhs == rhs) {
+    return AssertionSuccess();
+  }
+
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   FormatForComparisonFailureMessage(lhs, rhs),
+                   FormatForComparisonFailureMessage(rhs, lhs),
+                   false);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_?? with integer or enum arguments.  It is here
+// just to avoid copy-and-paste of similar code.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+                                   BiggestInt val1, BiggestInt val2) {\
+  if (val1 op val2) {\
+    return AssertionSuccess();\
+  } else {\
+    return AssertionFailure() \
+        << "Expected: (" << expr1 << ") " #op " (" << expr2\
+        << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+        << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+  }\
+}
+
+// Implements the helper function for {ASSERT|EXPECT}_NE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(NE, !=)
+// Implements the helper function for {ASSERT|EXPECT}_LE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LE, <=)
+// Implements the helper function for {ASSERT|EXPECT}_LT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LT, < )
+// Implements the helper function for {ASSERT|EXPECT}_GE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GE, >=)
+// Implements the helper function for {ASSERT|EXPECT}_GT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GT, > )
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+                               const char* rhs_expression,
+                               const char* lhs,
+                               const char* rhs) {
+  if (String::CStringEquals(lhs, rhs)) {
+    return AssertionSuccess();
+  }
+
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   PrintToString(lhs),
+                   PrintToString(rhs),
+                   false);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+AssertionResult CmpHelperSTRCASEEQ(const char* lhs_expression,
+                                   const char* rhs_expression,
+                                   const char* lhs,
+                                   const char* rhs) {
+  if (String::CaseInsensitiveCStringEquals(lhs, rhs)) {
+    return AssertionSuccess();
+  }
+
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   PrintToString(lhs),
+                   PrintToString(rhs),
+                   true);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+                               const char* s2_expression,
+                               const char* s1,
+                               const char* s2) {
+  if (!String::CStringEquals(s1, s2)) {
+    return AssertionSuccess();
+  } else {
+    return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+                              << s2_expression << "), actual: \""
+                              << s1 << "\" vs \"" << s2 << "\"";
+  }
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+                                   const char* s2_expression,
+                                   const char* s1,
+                                   const char* s2) {
+  if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
+    return AssertionSuccess();
+  } else {
+    return AssertionFailure()
+        << "Expected: (" << s1_expression << ") != ("
+        << s2_expression << ") (ignoring case), actual: \""
+        << s1 << "\" vs \"" << s2 << "\"";
+  }
+}
+
+}  // namespace internal
+
+namespace {
+
+// Helper functions for implementing IsSubString() and IsNotSubstring().
+
+// This group of overloaded functions return true iff needle is a
+// substring of haystack.  NULL is considered a substring of itself
+// only.
+
+bool IsSubstringPred(const char* needle, const char* haystack) {
+  if (needle == NULL || haystack == NULL)
+    return needle == haystack;
+
+  return strstr(haystack, needle) != NULL;
+}
+
+bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
+  if (needle == NULL || haystack == NULL)
+    return needle == haystack;
+
+  return wcsstr(haystack, needle) != NULL;
+}
+
+// StringType here can be either ::std::string or ::std::wstring.
+template <typename StringType>
+bool IsSubstringPred(const StringType& needle,
+                     const StringType& haystack) {
+  return haystack.find(needle) != StringType::npos;
+}
+
+// This function implements either IsSubstring() or IsNotSubstring(),
+// depending on the value of the expected_to_be_substring parameter.
+// StringType here can be const char*, const wchar_t*, ::std::string,
+// or ::std::wstring.
+template <typename StringType>
+AssertionResult IsSubstringImpl(
+    bool expected_to_be_substring,
+    const char* needle_expr, const char* haystack_expr,
+    const StringType& needle, const StringType& haystack) {
+  if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
+    return AssertionSuccess();
+
+  const bool is_wide_string = sizeof(needle[0]) > 1;
+  const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
+  return AssertionFailure()
+      << "Value of: " << needle_expr << "\n"
+      << "  Actual: " << begin_string_quote << needle << "\"\n"
+      << "Expected: " << (expected_to_be_substring ? "" : "not ")
+      << "a substring of " << haystack_expr << "\n"
+      << "Which is: " << begin_string_quote << haystack << "\"";
+}
+
+}  // namespace
+
+// IsSubstring() and IsNotSubstring() check whether needle is a
+// substring of haystack (NULL is considered a substring of itself
+// only), and return an appropriate error message when they fail.
+
+AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const char* needle, const char* haystack) {
+  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const wchar_t* needle, const wchar_t* haystack) {
+  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const char* needle, const char* haystack) {
+  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const wchar_t* needle, const wchar_t* haystack) {
+  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::string& needle, const ::std::string& haystack) {
+  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::string& needle, const ::std::string& haystack) {
+  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+#if GTEST_HAS_STD_WSTRING
+AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::wstring& needle, const ::std::wstring& haystack) {
+  return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::wstring& needle, const ::std::wstring& haystack) {
+  return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+#endif  // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+
+namespace {
+
+// Helper function for IsHRESULT{SuccessFailure} predicates
+AssertionResult HRESULTFailureHelper(const char* expr,
+                                     const char* expected,
+                                     long hr) {  // NOLINT
+# if GTEST_OS_WINDOWS_MOBILE
+
+  // Windows CE doesn't support FormatMessage.
+  const char error_text[] = "";
+
+# else
+
+  // Looks up the human-readable system message for the HRESULT code
+  // and since we're not passing any params to FormatMessage, we don't
+  // want inserts expanded.
+  const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
+                       FORMAT_MESSAGE_IGNORE_INSERTS;
+  const DWORD kBufSize = 4096;
+  // Gets the system's human readable message string for this HRESULT.
+  char error_text[kBufSize] = { '\0' };
+  DWORD message_length = ::FormatMessageA(kFlags,
+                                          0,  // no source, we're asking system
+                                          hr,  // the error
+                                          0,  // no line width restrictions
+                                          error_text,  // output buffer
+                                          kBufSize,  // buf size
+                                          NULL);  // no arguments for inserts
+  // Trims tailing white space (FormatMessage leaves a trailing CR-LF)
+  for (; message_length && IsSpace(error_text[message_length - 1]);
+          --message_length) {
+    error_text[message_length - 1] = '\0';
+  }
+
+# endif  // GTEST_OS_WINDOWS_MOBILE
+
+  const std::string error_hex("0x" + String::FormatHexInt(hr));
+  return ::testing::AssertionFailure()
+      << "Expected: " << expr << " " << expected << ".\n"
+      << "  Actual: " << error_hex << " " << error_text << "\n";
+}
+
+}  // namespace
+
+AssertionResult IsHRESULTSuccess(const char* expr, long hr) {  // NOLINT
+  if (SUCCEEDED(hr)) {
+    return AssertionSuccess();
+  }
+  return HRESULTFailureHelper(expr, "succeeds", hr);
+}
+
+AssertionResult IsHRESULTFailure(const char* expr, long hr) {  // NOLINT
+  if (FAILED(hr)) {
+    return AssertionSuccess();
+  }
+  return HRESULTFailureHelper(expr, "fails", hr);
+}
+
+#endif  // GTEST_OS_WINDOWS
+
+// Utility functions for encoding Unicode text (wide strings) in
+// UTF-8.
+
+// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
+// like this:
+//
+// Code-point length   Encoding
+//   0 -  7 bits       0xxxxxxx
+//   8 - 11 bits       110xxxxx 10xxxxxx
+//  12 - 16 bits       1110xxxx 10xxxxxx 10xxxxxx
+//  17 - 21 bits       11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+// The maximum code-point a one-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) <<  7) - 1;
+
+// The maximum code-point a two-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
+
+// The maximum code-point a three-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
+
+// The maximum code-point a four-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
+
+// Chops off the n lowest bits from a bit pattern.  Returns the n
+// lowest bits.  As a side effect, the original bit pattern will be
+// shifted to the right by n bits.
+inline UInt32 ChopLowBits(UInt32* bits, int n) {
+  const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
+  *bits >>= n;
+  return low_bits;
+}
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+std::string CodePointToUtf8(UInt32 code_point) {
+  if (code_point > kMaxCodePoint4) {
+    return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")";
+  }
+
+  char str[5];  // Big enough for the largest valid code point.
+  if (code_point <= kMaxCodePoint1) {
+    str[1] = '\0';
+    str[0] = static_cast<char>(code_point);                          // 0xxxxxxx
+  } else if (code_point <= kMaxCodePoint2) {
+    str[2] = '\0';
+    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[0] = static_cast<char>(0xC0 | code_point);                   // 110xxxxx
+  } else if (code_point <= kMaxCodePoint3) {
+    str[3] = '\0';
+    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[0] = static_cast<char>(0xE0 | code_point);                   // 1110xxxx
+  } else {  // code_point <= kMaxCodePoint4
+    str[4] = '\0';
+    str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6));  // 10xxxxxx
+    str[0] = static_cast<char>(0xF0 | code_point);                   // 11110xxx
+  }
+  return str;
+}
+
+// The following two functions only make sense if the the system
+// uses UTF-16 for wide string encoding. All supported systems
+// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
+
+// Determines if the arguments constitute UTF-16 surrogate pair
+// and thus should be combined into a single Unicode code point
+// using CreateCodePointFromUtf16SurrogatePair.
+inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
+  return sizeof(wchar_t) == 2 &&
+      (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
+}
+
+// Creates a Unicode code point from UTF16 surrogate pair.
+inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
+                                                    wchar_t second) {
+  const UInt32 mask = (1 << 10) - 1;
+  return (sizeof(wchar_t) == 2) ?
+      (((first & mask) << 10) | (second & mask)) + 0x10000 :
+      // This function should not be called when the condition is
+      // false, but we provide a sensible default in case it is.
+      static_cast<UInt32>(first);
+}
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+std::string WideStringToUtf8(const wchar_t* str, int num_chars) {
+  if (num_chars == -1)
+    num_chars = static_cast<int>(wcslen(str));
+
+  ::std::stringstream stream;
+  for (int i = 0; i < num_chars; ++i) {
+    UInt32 unicode_code_point;
+
+    if (str[i] == L'\0') {
+      break;
+    } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
+      unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
+                                                                 str[i + 1]);
+      i++;
+    } else {
+      unicode_code_point = static_cast<UInt32>(str[i]);
+    }
+
+    stream << CodePointToUtf8(unicode_code_point);
+  }
+  return StringStreamToString(&stream);
+}
+
+// Converts a wide C string to an std::string using the UTF-8 encoding.
+// NULL will be converted to "(null)".
+std::string String::ShowWideCString(const wchar_t * wide_c_str) {
+  if (wide_c_str == NULL)  return "(null)";
+
+  return internal::WideStringToUtf8(wide_c_str, -1);
+}
+
+// Compares two wide C strings.  Returns true iff they have the same
+// content.
+//
+// Unlike wcscmp(), this function can handle NULL argument(s).  A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
+  if (lhs == NULL) return rhs == NULL;
+
+  if (rhs == NULL) return false;
+
+  return wcscmp(lhs, rhs) == 0;
+}
+
+// Helper function for *_STREQ on wide strings.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+                               const char* rhs_expression,
+                               const wchar_t* lhs,
+                               const wchar_t* rhs) {
+  if (String::WideCStringEquals(lhs, rhs)) {
+    return AssertionSuccess();
+  }
+
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   PrintToString(lhs),
+                   PrintToString(rhs),
+                   false);
+}
+
+// Helper function for *_STRNE on wide strings.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+                               const char* s2_expression,
+                               const wchar_t* s1,
+                               const wchar_t* s2) {
+  if (!String::WideCStringEquals(s1, s2)) {
+    return AssertionSuccess();
+  }
+
+  return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+                            << s2_expression << "), actual: "
+                            << PrintToString(s1)
+                            << " vs " << PrintToString(s2);
+}
+
+// Compares two C strings, ignoring case.  Returns true iff they have
+// the same content.
+//
+// Unlike strcasecmp(), this function can handle NULL argument(s).  A
+// NULL C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
+  if (lhs == NULL)
+    return rhs == NULL;
+  if (rhs == NULL)
+    return false;
+  return posix::StrCaseCmp(lhs, rhs) == 0;
+}
+
+  // Compares two wide C strings, ignoring case.  Returns true iff they
+  // have the same content.
+  //
+  // Unlike wcscasecmp(), this function can handle NULL argument(s).
+  // A NULL C string is considered different to any non-NULL wide C string,
+  // including the empty string.
+  // NB: The implementations on different platforms slightly differ.
+  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+  // environment variable. On GNU platform this method uses wcscasecmp
+  // which compares according to LC_CTYPE category of the current locale.
+  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+  // current locale.
+bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+                                              const wchar_t* rhs) {
+  if (lhs == NULL) return rhs == NULL;
+
+  if (rhs == NULL) return false;
+
+#if GTEST_OS_WINDOWS
+  return _wcsicmp(lhs, rhs) == 0;
+#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID
+  return wcscasecmp(lhs, rhs) == 0;
+#else
+  // Android, Mac OS X and Cygwin don't define wcscasecmp.
+  // Other unknown OSes may not define it either.
+  wint_t left, right;
+  do {
+    left = towlower(*lhs++);
+    right = towlower(*rhs++);
+  } while (left && left == right);
+  return left == right;
+#endif  // OS selector
+}
+
+// Returns true iff str ends with the given suffix, ignoring case.
+// Any string is considered to end with an empty suffix.
+bool String::EndsWithCaseInsensitive(
+    const std::string& str, const std::string& suffix) {
+  const size_t str_len = str.length();
+  const size_t suffix_len = suffix.length();
+  return (str_len >= suffix_len) &&
+         CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len,
+                                      suffix.c_str());
+}
+
+// Formats an int value as "%02d".
+std::string String::FormatIntWidth2(int value) {
+  std::stringstream ss;
+  ss << std::setfill('0') << std::setw(2) << value;
+  return ss.str();
+}
+
+// Formats an int value as "%X".
+std::string String::FormatHexInt(int value) {
+  std::stringstream ss;
+  ss << std::hex << std::uppercase << value;
+  return ss.str();
+}
+
+// Formats a byte as "%02X".
+std::string String::FormatByte(unsigned char value) {
+  std::stringstream ss;
+  ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase
+     << static_cast<unsigned int>(value);
+  return ss.str();
+}
+
+// Converts the buffer in a stringstream to an std::string, converting NUL
+// bytes to "\\0" along the way.
+std::string StringStreamToString(::std::stringstream* ss) {
+  const ::std::string& str = ss->str();
+  const char* const start = str.c_str();
+  const char* const end = start + str.length();
+
+  std::string result;
+  result.reserve(2 * (end - start));
+  for (const char* ch = start; ch != end; ++ch) {
+    if (*ch == '\0') {
+      result += "\\0";  // Replaces NUL with "\\0";
+    } else {
+      result += *ch;
+    }
+  }
+
+  return result;
+}
+
+// Appends the user-supplied message to the Google-Test-generated message.
+std::string AppendUserMessage(const std::string& gtest_msg,
+                              const Message& user_msg) {
+  // Appends the user message if it's non-empty.
+  const std::string user_msg_string = user_msg.GetString();
+  if (user_msg_string.empty()) {
+    return gtest_msg;
+  }
+
+  return gtest_msg + "\n" + user_msg_string;
+}
+
+}  // namespace internal
+
+// class TestResult
+
+// Creates an empty TestResult.
+TestResult::TestResult()
+    : death_test_count_(0),
+      elapsed_time_(0) {
+}
+
+// D'tor.
+TestResult::~TestResult() {
+}
+
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+  if (i < 0 || i >= total_part_count())
+    internal::posix::Abort();
+  return test_part_results_.at(i);
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+  if (i < 0 || i >= test_property_count())
+    internal::posix::Abort();
+  return test_properties_.at(i);
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+  test_part_results_.clear();
+}
+
+// Adds a test part result to the list.
+void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
+  test_part_results_.push_back(test_part_result);
+}
+
+// Adds a test property to the list. If a property with the same key as the
+// supplied property is already represented, the value of this test_property
+// replaces the old value for that key.
+void TestResult::RecordProperty(const std::string& xml_element,
+                                const TestProperty& test_property) {
+  if (!ValidateTestProperty(xml_element, test_property)) {
+    return;
+  }
+  internal::MutexLock lock(&test_properites_mutex_);
+  const std::vector<TestProperty>::iterator property_with_matching_key =
+      std::find_if(test_properties_.begin(), test_properties_.end(),
+                   internal::TestPropertyKeyIs(test_property.key()));
+  if (property_with_matching_key == test_properties_.end()) {
+    test_properties_.push_back(test_property);
+    return;
+  }
+  property_with_matching_key->SetValue(test_property.value());
+}
+
+// The list of reserved attributes used in the <testsuites> element of XML
+// output.
+static const char* const kReservedTestSuitesAttributes[] = {
+  "disabled",
+  "errors",
+  "failures",
+  "name",
+  "random_seed",
+  "tests",
+  "time",
+  "timestamp"
+};
+
+// The list of reserved attributes used in the <testsuite> element of XML
+// output.
+static const char* const kReservedTestSuiteAttributes[] = {
+  "disabled",
+  "errors",
+  "failures",
+  "name",
+  "tests",
+  "time"
+};
+
+// The list of reserved attributes used in the <testcase> element of XML output.
+static const char* const kReservedTestCaseAttributes[] = {
+  "classname",
+  "name",
+  "status",
+  "time",
+  "type_param",
+  "value_param"
+};
+
+template <int kSize>
+std::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {
+  return std::vector<std::string>(array, array + kSize);
+}
+
+static std::vector<std::string> GetReservedAttributesForElement(
+    const std::string& xml_element) {
+  if (xml_element == "testsuites") {
+    return ArrayAsVector(kReservedTestSuitesAttributes);
+  } else if (xml_element == "testsuite") {
+    return ArrayAsVector(kReservedTestSuiteAttributes);
+  } else if (xml_element == "testcase") {
+    return ArrayAsVector(kReservedTestCaseAttributes);
+  } else {
+    GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element;
+  }
+  // This code is unreachable but some compilers may not realizes that.
+  return std::vector<std::string>();
+}
+
+static std::string FormatWordList(const std::vector<std::string>& words) {
+  Message word_list;
+  for (size_t i = 0; i < words.size(); ++i) {
+    if (i > 0 && words.size() > 2) {
+      word_list << ", ";
+    }
+    if (i == words.size() - 1) {
+      word_list << "and ";
+    }
+    word_list << "'" << words[i] << "'";
+  }
+  return word_list.GetString();
+}
+
+bool ValidateTestPropertyName(const std::string& property_name,
+                              const std::vector<std::string>& reserved_names) {
+  if (std::find(reserved_names.begin(), reserved_names.end(), property_name) !=
+          reserved_names.end()) {
+    ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name
+                  << " (" << FormatWordList(reserved_names)
+                  << " are reserved by " << GTEST_NAME_ << ")";
+    return false;
+  }
+  return true;
+}
+
+// Adds a failure if the key is a reserved attribute of the element named
+// xml_element.  Returns true if the property is valid.
+bool TestResult::ValidateTestProperty(const std::string& xml_element,
+                                      const TestProperty& test_property) {
+  return ValidateTestPropertyName(test_property.key(),
+                                  GetReservedAttributesForElement(xml_element));
+}
+
+// Clears the object.
+void TestResult::Clear() {
+  test_part_results_.clear();
+  test_properties_.clear();
+  death_test_count_ = 0;
+  elapsed_time_ = 0;
+}
+
+// Returns true iff the test failed.
+bool TestResult::Failed() const {
+  for (int i = 0; i < total_part_count(); ++i) {
+    if (GetTestPartResult(i).failed())
+      return true;
+  }
+  return false;
+}
+
+// Returns true iff the test part fatally failed.
+static bool TestPartFatallyFailed(const TestPartResult& result) {
+  return result.fatally_failed();
+}
+
+// Returns true iff the test fatally failed.
+bool TestResult::HasFatalFailure() const {
+  return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true iff the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+  return result.nonfatally_failed();
+}
+
+// Returns true iff the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+  return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
+}
+
+// Gets the number of all test parts.  This is the sum of the number
+// of successful test parts and the number of failed test parts.
+int TestResult::total_part_count() const {
+  return static_cast<int>(test_part_results_.size());
+}
+
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+  return static_cast<int>(test_properties_.size());
+}
+
+// class Test
+
+// Creates a Test object.
+
+// The c'tor saves the states of all flags.
+Test::Test()
+    : gtest_flag_saver_(new GTEST_FLAG_SAVER_) {
+}
+
+// The d'tor restores the states of all flags.  The actual work is
+// done by the d'tor of the gtest_flag_saver_ field, and thus not
+// visible here.
+Test::~Test() {
+}
+
+// Sets up the test fixture.
+//
+// A sub-class may override this.
+void Test::SetUp() {
+}
+
+// Tears down the test fixture.
+//
+// A sub-class may override this.
+void Test::TearDown() {
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, const std::string& value) {
+  UnitTest::GetInstance()->RecordProperty(key, value);
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, int value) {
+  Message value_message;
+  value_message << value;
+  RecordProperty(key, value_message.GetString().c_str());
+}
+
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+                                    const std::string& message) {
+  // This function is a friend of UnitTest and as such has access to
+  // AddTestPartResult.
+  UnitTest::GetInstance()->AddTestPartResult(
+      result_type,
+      NULL,  // No info about the source file where the exception occurred.
+      -1,    // We have no info on which line caused the exception.
+      message,
+      "");   // No stack trace, either.
+}
+
+}  // namespace internal
+
+// Google Test requires all tests in the same test case to use the same test
+// fixture class.  This function checks if the current test has the
+// same fixture class as the first test in the current test case.  If
+// yes, it returns true; otherwise it generates a Google Test failure and
+// returns false.
+bool Test::HasSameFixtureClass() {
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  const TestCase* const test_case = impl->current_test_case();
+
+  // Info about the first test in the current test case.
+  const TestInfo* const first_test_info = test_case->test_info_list()[0];
+  const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;
+  const char* const first_test_name = first_test_info->name();
+
+  // Info about the current test.
+  const TestInfo* const this_test_info = impl->current_test_info();
+  const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;
+  const char* const this_test_name = this_test_info->name();
+
+  if (this_fixture_id != first_fixture_id) {
+    // Is the first test defined using TEST?
+    const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
+    // Is this test defined using TEST?
+    const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
+
+    if (first_is_TEST || this_is_TEST) {
+      // Both TEST and TEST_F appear in same test case, which is incorrect.
+      // Tell the user how to fix this.
+
+      // Gets the name of the TEST and the name of the TEST_F.  Note
+      // that first_is_TEST and this_is_TEST cannot both be true, as
+      // the fixture IDs are different for the two tests.
+      const char* const TEST_name =
+          first_is_TEST ? first_test_name : this_test_name;
+      const char* const TEST_F_name =
+          first_is_TEST ? this_test_name : first_test_name;
+
+      ADD_FAILURE()
+          << "All tests in the same test case must use the same test fixture\n"
+          << "class, so mixing TEST_F and TEST in the same test case is\n"
+          << "illegal.  In test case " << this_test_info->test_case_name()
+          << ",\n"
+          << "test " << TEST_F_name << " is defined using TEST_F but\n"
+          << "test " << TEST_name << " is defined using TEST.  You probably\n"
+          << "want to change the TEST to TEST_F or move it to another test\n"
+          << "case.";
+    } else {
+      // Two fixture classes with the same name appear in two different
+      // namespaces, which is not allowed. Tell the user how to fix this.
+      ADD_FAILURE()
+          << "All tests in the same test case must use the same test fixture\n"
+          << "class.  However, in test case "
+          << this_test_info->test_case_name() << ",\n"
+          << "you defined test " << first_test_name
+          << " and test " << this_test_name << "\n"
+          << "using two different test fixture classes.  This can happen if\n"
+          << "the two classes are from different namespaces or translation\n"
+          << "units and have the same name.  You should probably rename one\n"
+          << "of the classes to put the tests into different test cases.";
+    }
+    return false;
+  }
+
+  return true;
+}
+
+#if GTEST_HAS_SEH
+
+// Adds an "exception thrown" fatal failure to the current test.  This
+// function returns its result via an output parameter pointer because VC++
+// prohibits creation of objects with destructors on stack in functions
+// using __try (see error C2712).
+static std::string* FormatSehExceptionMessage(DWORD exception_code,
+                                              const char* location) {
+  Message message;
+  message << "SEH exception with code 0x" << std::setbase(16) <<
+    exception_code << std::setbase(10) << " thrown in " << location << ".";
+
+  return new std::string(message.GetString());
+}
+
+#endif  // GTEST_HAS_SEH
+
+namespace internal {
+
+#if GTEST_HAS_EXCEPTIONS
+
+// Adds an "exception thrown" fatal failure to the current test.
+static std::string FormatCxxExceptionMessage(const char* description,
+                                             const char* location) {
+  Message message;
+  if (description != NULL) {
+    message << "C++ exception with description \"" << description << "\"";
+  } else {
+    message << "Unknown C++ exception";
+  }
+  message << " thrown in " << location << ".";
+
+  return message.GetString();
+}
+
+static std::string PrintTestPartResultToString(
+    const TestPartResult& test_part_result);
+
+GoogleTestFailureException::GoogleTestFailureException(
+    const TestPartResult& failure)
+    : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+
+#endif  // GTEST_HAS_EXCEPTIONS
+
+// We put these helper functions in the internal namespace as IBM's xlC
+// compiler rejects the code if they were declared static.
+
+// Runs the given method and handles SEH exceptions it throws, when
+// SEH is supported; returns the 0-value for type Result in case of an
+// SEH exception.  (Microsoft compilers cannot handle SEH and C++
+// exceptions in the same function.  Therefore, we provide a separate
+// wrapper function for handling SEH exceptions.)
+template <class T, typename Result>
+Result HandleSehExceptionsInMethodIfSupported(
+    T* object, Result (T::*method)(), const char* location) {
+#if GTEST_HAS_SEH
+  __try {
+    return (object->*method)();
+  } __except (internal::UnitTestOptions::GTestShouldProcessSEH(  // NOLINT
+      GetExceptionCode())) {
+    // We create the exception message on the heap because VC++ prohibits
+    // creation of objects with destructors on stack in functions using __try
+    // (see error C2712).
+    std::string* exception_message = FormatSehExceptionMessage(
+        GetExceptionCode(), location);
+    internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+                                             *exception_message);
+    delete exception_message;
+    return static_cast<Result>(0);
+  }
+#else
+  (void)location;
+  return (object->*method)();
+#endif  // GTEST_HAS_SEH
+}
+
+// Runs the given method and catches and reports C++ and/or SEH-style
+// exceptions, if they are supported; returns the 0-value for type
+// Result in case of an SEH exception.
+template <class T, typename Result>
+Result HandleExceptionsInMethodIfSupported(
+    T* object, Result (T::*method)(), const char* location) {
+  // NOTE: The user code can affect the way in which Google Test handles
+  // exceptions by setting GTEST_FLAG(catch_exceptions), but only before
+  // RUN_ALL_TESTS() starts. It is technically possible to check the flag
+  // after the exception is caught and either report or re-throw the
+  // exception based on the flag's value:
+  //
+  // try {
+  //   // Perform the test method.
+  // } catch (...) {
+  //   if (GTEST_FLAG(catch_exceptions))
+  //     // Report the exception as failure.
+  //   else
+  //     throw;  // Re-throws the original exception.
+  // }
+  //
+  // However, the purpose of this flag is to allow the program to drop into
+  // the debugger when the exception is thrown. On most platforms, once the
+  // control enters the catch block, the exception origin information is
+  // lost and the debugger will stop the program at the point of the
+  // re-throw in this function -- instead of at the point of the original
+  // throw statement in the code under test.  For this reason, we perform
+  // the check early, sacrificing the ability to affect Google Test's
+  // exception handling in the method where the exception is thrown.
+  if (internal::GetUnitTestImpl()->catch_exceptions()) {
+#if GTEST_HAS_EXCEPTIONS
+    try {
+      return HandleSehExceptionsInMethodIfSupported(object, method, location);
+    } catch (const internal::GoogleTestFailureException&) {  // NOLINT
+      // This exception type can only be thrown by a failed Google
+      // Test assertion with the intention of letting another testing
+      // framework catch it.  Therefore we just re-throw it.
+      throw;
+    } catch (const std::exception& e) {  // NOLINT
+      internal::ReportFailureInUnknownLocation(
+          TestPartResult::kFatalFailure,
+          FormatCxxExceptionMessage(e.what(), location));
+    } catch (...) {  // NOLINT
+      internal::ReportFailureInUnknownLocation(
+          TestPartResult::kFatalFailure,
+          FormatCxxExceptionMessage(NULL, location));
+    }
+    return static_cast<Result>(0);
+#else
+    return HandleSehExceptionsInMethodIfSupported(object, method, location);
+#endif  // GTEST_HAS_EXCEPTIONS
+  } else {
+    return (object->*method)();
+  }
+}
+
+}  // namespace internal
+
+// Runs the test and updates the test result.
+void Test::Run() {
+  if (!HasSameFixtureClass()) return;
+
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+  internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()");
+  // We will run the test only if SetUp() was successful.
+  if (!HasFatalFailure()) {
+    impl->os_stack_trace_getter()->UponLeavingGTest();
+    internal::HandleExceptionsInMethodIfSupported(
+        this, &Test::TestBody, "the test body");
+  }
+
+  // However, we want to clean up as much as possible.  Hence we will
+  // always call TearDown(), even if SetUp() or the test body has
+  // failed.
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+  internal::HandleExceptionsInMethodIfSupported(
+      this, &Test::TearDown, "TearDown()");
+}
+
+// Returns true iff the current test has a fatal failure.
+bool Test::HasFatalFailure() {
+  return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
+}
+
+// Returns true iff the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+  return internal::GetUnitTestImpl()->current_test_result()->
+      HasNonfatalFailure();
+}
+
+// class TestInfo
+
+// Constructs a TestInfo object. It assumes ownership of the test factory
+// object.
+TestInfo::TestInfo(const std::string& a_test_case_name,
+                   const std::string& a_name,
+                   const char* a_type_param,
+                   const char* a_value_param,
+                   internal::CodeLocation a_code_location,
+                   internal::TypeId fixture_class_id,
+                   internal::TestFactoryBase* factory)
+    : test_case_name_(a_test_case_name),
+      name_(a_name),
+      type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+      value_param_(a_value_param ? new std::string(a_value_param) : NULL),
+      location_(a_code_location),
+      fixture_class_id_(fixture_class_id),
+      should_run_(false),
+      is_disabled_(false),
+      matches_filter_(false),
+      factory_(factory),
+      result_() {}
+
+// Destructs a TestInfo object.
+TestInfo::~TestInfo() { delete factory_; }
+
+namespace internal {
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+//   test_case_name:   name of the test case
+//   name:             name of the test
+//   type_param:       the name of the test's type parameter, or NULL if
+//                     this is not a typed or a type-parameterized test.
+//   value_param:      text representation of the test's value parameter,
+//                     or NULL if this is not a value-parameterized test.
+//   code_location:    code location where the test is defined
+//   fixture_class_id: ID of the test fixture class
+//   set_up_tc:        pointer to the function that sets up the test case
+//   tear_down_tc:     pointer to the function that tears down the test case
+//   factory:          pointer to the factory that creates a test object.
+//                     The newly created TestInfo instance will assume
+//                     ownership of the factory object.
+TestInfo* MakeAndRegisterTestInfo(
+    const char* test_case_name,
+    const char* name,
+    const char* type_param,
+    const char* value_param,
+    CodeLocation code_location,
+    TypeId fixture_class_id,
+    SetUpTestCaseFunc set_up_tc,
+    TearDownTestCaseFunc tear_down_tc,
+    TestFactoryBase* factory) {
+  TestInfo* const test_info =
+      new TestInfo(test_case_name, name, type_param, value_param,
+                   code_location, fixture_class_id, factory);
+  GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
+  return test_info;
+}
+
+#if GTEST_HAS_PARAM_TEST
+void ReportInvalidTestCaseType(const char* test_case_name,
+                               CodeLocation code_location) {
+  Message errors;
+  errors
+      << "Attempted redefinition of test case " << test_case_name << ".\n"
+      << "All tests in the same test case must use the same test fixture\n"
+      << "class.  However, in test case " << test_case_name << ", you tried\n"
+      << "to define a test using a fixture class different from the one\n"
+      << "used earlier. This can happen if the two fixture classes are\n"
+      << "from different namespaces and have the same name. You should\n"
+      << "probably rename one of the classes to put the tests into different\n"
+      << "test cases.";
+
+  fprintf(stderr, "%s %s",
+          FormatFileLocation(code_location.file.c_str(),
+                             code_location.line).c_str(),
+          errors.GetString().c_str());
+}
+#endif  // GTEST_HAS_PARAM_TEST
+
+}  // namespace internal
+
+namespace {
+
+// A predicate that checks the test name of a TestInfo against a known
+// value.
+//
+// This is used for implementation of the TestCase class only.  We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestNameIs is copyable.
+class TestNameIs {
+ public:
+  // Constructor.
+  //
+  // TestNameIs has NO default constructor.
+  explicit TestNameIs(const char* name)
+      : name_(name) {}
+
+  // Returns true iff the test name of test_info matches name_.
+  bool operator()(const TestInfo * test_info) const {
+    return test_info && test_info->name() == name_;
+  }
+
+ private:
+  std::string name_;
+};
+
+}  // namespace
+
+namespace internal {
+
+// This method expands all parameterized tests registered with macros TEST_P
+// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
+// This will be done just once during the program runtime.
+void UnitTestImpl::RegisterParameterizedTests() {
+#if GTEST_HAS_PARAM_TEST
+  if (!parameterized_tests_registered_) {
+    parameterized_test_registry_.RegisterTests();
+    parameterized_tests_registered_ = true;
+  }
+#endif
+}
+
+}  // namespace internal
+
+// Creates the test object, runs it, records its result, and then
+// deletes it.
+void TestInfo::Run() {
+  if (!should_run_) return;
+
+  // Tells UnitTest where to store test result.
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  impl->set_current_test_info(this);
+
+  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+  // Notifies the unit test event listeners that a test is about to start.
+  repeater->OnTestStart(*this);
+
+  const TimeInMillis start = internal::GetTimeInMillis();
+
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+
+  // Creates the test object.
+  Test* const test = internal::HandleExceptionsInMethodIfSupported(
+      factory_, &internal::TestFactoryBase::CreateTest,
+      "the test fixture's constructor");
+
+  // Runs the test only if the test object was created and its
+  // constructor didn't generate a fatal failure.
+  if ((test != NULL) && !Test::HasFatalFailure()) {
+    // This doesn't throw as all user code that can throw are wrapped into
+    // exception handling code.
+    test->Run();
+  }
+
+  // Deletes the test object.
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+  internal::HandleExceptionsInMethodIfSupported(
+      test, &Test::DeleteSelf_, "the test fixture's destructor");
+
+  result_.set_elapsed_time(internal::GetTimeInMillis() - start);
+
+  // Notifies the unit test event listener that a test has just finished.
+  repeater->OnTestEnd(*this);
+
+  // Tells UnitTest to stop associating assertion results to this
+  // test.
+  impl->set_current_test_info(NULL);
+}
+
+// class TestCase
+
+// Gets the number of successful tests in this test case.
+int TestCase::successful_test_count() const {
+  return CountIf(test_info_list_, TestPassed);
+}
+
+// Gets the number of failed tests in this test case.
+int TestCase::failed_test_count() const {
+  return CountIf(test_info_list_, TestFailed);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int TestCase::reportable_disabled_test_count() const {
+  return CountIf(test_info_list_, TestReportableDisabled);
+}
+
+// Gets the number of disabled tests in this test case.
+int TestCase::disabled_test_count() const {
+  return CountIf(test_info_list_, TestDisabled);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int TestCase::reportable_test_count() const {
+  return CountIf(test_info_list_, TestReportable);
+}
+
+// Get the number of tests in this test case that should run.
+int TestCase::test_to_run_count() const {
+  return CountIf(test_info_list_, ShouldRunTest);
+}
+
+// Gets the number of all tests.
+int TestCase::total_test_count() const {
+  return static_cast<int>(test_info_list_.size());
+}
+
+// Creates a TestCase with the given name.
+//
+// Arguments:
+//
+//   name:         name of the test case
+//   a_type_param: the name of the test case's type parameter, or NULL if
+//                 this is not a typed or a type-parameterized test case.
+//   set_up_tc:    pointer to the function that sets up the test case
+//   tear_down_tc: pointer to the function that tears down the test case
+TestCase::TestCase(const char* a_name, const char* a_type_param,
+                   Test::SetUpTestCaseFunc set_up_tc,
+                   Test::TearDownTestCaseFunc tear_down_tc)
+    : name_(a_name),
+      type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+      set_up_tc_(set_up_tc),
+      tear_down_tc_(tear_down_tc),
+      should_run_(false),
+      elapsed_time_(0) {
+}
+
+// Destructor of TestCase.
+TestCase::~TestCase() {
+  // Deletes every Test in the collection.
+  ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestCase::GetTestInfo(int i) const {
+  const int index = GetElementOr(test_indices_, i, -1);
+  return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestCase::GetMutableTestInfo(int i) {
+  const int index = GetElementOr(test_indices_, i, -1);
+  return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Adds a test to this test case.  Will delete the test upon
+// destruction of the TestCase object.
+void TestCase::AddTestInfo(TestInfo * test_info) {
+  test_info_list_.push_back(test_info);
+  test_indices_.push_back(static_cast<int>(test_indices_.size()));
+}
+
+// Runs every test in this TestCase.
+void TestCase::Run() {
+  if (!should_run_) return;
+
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  impl->set_current_test_case(this);
+
+  TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+  repeater->OnTestCaseStart(*this);
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+  internal::HandleExceptionsInMethodIfSupported(
+      this, &TestCase::RunSetUpTestCase, "SetUpTestCase()");
+
+  const internal::TimeInMillis start = internal::GetTimeInMillis();
+  for (int i = 0; i < total_test_count(); i++) {
+    GetMutableTestInfo(i)->Run();
+  }
+  elapsed_time_ = internal::GetTimeInMillis() - start;
+
+  impl->os_stack_trace_getter()->UponLeavingGTest();
+  internal::HandleExceptionsInMethodIfSupported(
+      this, &TestCase::RunTearDownTestCase, "TearDownTestCase()");
+
+  repeater->OnTestCaseEnd(*this);
+  impl->set_current_test_case(NULL);
+}
+
+// Clears the results of all tests in this test case.
+void TestCase::ClearResult() {
+  ad_hoc_test_result_.Clear();
+  ForEach(test_info_list_, TestInfo::ClearTestResult);
+}
+
+// Shuffles the tests in this test case.
+void TestCase::ShuffleTests(internal::Random* random) {
+  Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestCase::UnshuffleTests() {
+  for (size_t i = 0; i < test_indices_.size(); i++) {
+    test_indices_[i] = static_cast<int>(i);
+  }
+}
+
+// Formats a countable noun.  Depending on its quantity, either the
+// singular form or the plural form is used. e.g.
+//
+// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
+// FormatCountableNoun(5, "book", "books") returns "5 books".
+static std::string FormatCountableNoun(int count,
+                                       const char * singular_form,
+                                       const char * plural_form) {
+  return internal::StreamableToString(count) + " " +
+      (count == 1 ? singular_form : plural_form);
+}
+
+// Formats the count of tests.
+static std::string FormatTestCount(int test_count) {
+  return FormatCountableNoun(test_count, "test", "tests");
+}
+
+// Formats the count of test cases.
+static std::string FormatTestCaseCount(int test_case_count) {
+  return FormatCountableNoun(test_case_count, "test case", "test cases");
+}
+
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation.  Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
+  switch (type) {
+    case TestPartResult::kSuccess:
+      return "Success";
+
+    case TestPartResult::kNonFatalFailure:
+    case TestPartResult::kFatalFailure:
+#ifdef _MSC_VER
+      return "error: ";
+#else
+      return "Failure\n";
+#endif
+    default:
+      return "Unknown result type";
+  }
+}
+
+namespace internal {
+
+// Prints a TestPartResult to an std::string.
+static std::string PrintTestPartResultToString(
+    const TestPartResult& test_part_result) {
+  return (Message()
+          << internal::FormatFileLocation(test_part_result.file_name(),
+                                          test_part_result.line_number())
+          << " " << TestPartResultTypeToString(test_part_result.type())
+          << test_part_result.message()).GetString();
+}
+
+// Prints a TestPartResult.
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+  const std::string& result =
+      PrintTestPartResultToString(test_part_result);
+  printf("%s\n", result.c_str());
+  fflush(stdout);
+  // If the test program runs in Visual Studio or a debugger, the
+  // following statements add the test part result message to the Output
+  // window such that the user can double-click on it to jump to the
+  // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+  // We don't call OutputDebugString*() on Windows Mobile, as printing
+  // to stdout is done by OutputDebugString() there already - we don't
+  // want the same message printed twice.
+  ::OutputDebugStringA(result.c_str());
+  ::OutputDebugStringA("\n");
+#endif
+}
+
+// class PrettyUnitTestResultPrinter
+
+enum GTestColor {
+  COLOR_DEFAULT,
+  COLOR_RED,
+  COLOR_GREEN,
+  COLOR_YELLOW
+};
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+// Returns the character attribute for the given color.
+WORD GetColorAttribute(GTestColor color) {
+  switch (color) {
+    case COLOR_RED:    return FOREGROUND_RED;
+    case COLOR_GREEN:  return FOREGROUND_GREEN;
+    case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
+    default:           return 0;
+  }
+}
+
+#else
+
+// Returns the ANSI color code for the given color.  COLOR_DEFAULT is
+// an invalid input.
+const char* GetAnsiColorCode(GTestColor color) {
+  switch (color) {
+    case COLOR_RED:     return "1";
+    case COLOR_GREEN:   return "2";
+    case COLOR_YELLOW:  return "3";
+    default:            return NULL;
+  };
+}
+
+#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns true iff Google Test should use colors in the output.
+bool ShouldUseColor(bool stdout_is_tty) {
+  const char* const gtest_color = GTEST_FLAG(color).c_str();
+
+  if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
+#if GTEST_OS_WINDOWS
+    // On Windows the TERM variable is usually not set, but the
+    // console there does support colors.
+    return stdout_is_tty;
+#else
+    // On non-Windows platforms, we rely on the TERM variable.
+    const char* const term = posix::GetEnv("TERM");
+    const bool term_supports_color =
+        String::CStringEquals(term, "xterm") ||
+        String::CStringEquals(term, "xterm-color") ||
+        String::CStringEquals(term, "xterm-256color") ||
+        String::CStringEquals(term, "screen") ||
+        String::CStringEquals(term, "screen-256color") ||
+        String::CStringEquals(term, "tmux") ||
+        String::CStringEquals(term, "tmux-256color") ||
+        String::CStringEquals(term, "rxvt-unicode") ||
+        String::CStringEquals(term, "rxvt-unicode-256color") ||
+        String::CStringEquals(term, "linux") ||
+        String::CStringEquals(term, "cygwin");
+    return stdout_is_tty && term_supports_color;
+#endif  // GTEST_OS_WINDOWS
+  }
+
+  return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
+      String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
+      String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
+      String::CStringEquals(gtest_color, "1");
+  // We take "yes", "true", "t", and "1" as meaning "yes".  If the
+  // value is neither one of these nor "auto", we treat it as "no" to
+  // be conservative.
+}
+
+// Helpers for printing colored strings to stdout. Note that on Windows, we
+// cannot simply emit special characters and have the terminal change colors.
+// This routine must actually emit the characters rather than return a string
+// that would be colored when printed, as can be done on Linux.
+void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+  va_list args;
+  va_start(args, fmt);
+
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || \
+    GTEST_OS_IOS || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+  const bool use_color = AlwaysFalse();
+#else
+  static const bool in_color_mode =
+      ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+  const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
+#endif  // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+  // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
+
+  if (!use_color) {
+    vprintf(fmt, args);
+    va_end(args);
+    return;
+  }
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+    !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+  const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+  // Gets the current text color.
+  CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+  GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+  const WORD old_color_attrs = buffer_info.wAttributes;
+
+  // We need to flush the stream buffers into the console before each
+  // SetConsoleTextAttribute call lest it affect the text that is already
+  // printed but has not yet reached the console.
+  fflush(stdout);
+  SetConsoleTextAttribute(stdout_handle,
+                          GetColorAttribute(color) | FOREGROUND_INTENSITY);
+  vprintf(fmt, args);
+
+  fflush(stdout);
+  // Restores the text color.
+  SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+  printf("\033[0;3%sm", GetAnsiColorCode(color));
+  vprintf(fmt, args);
+  printf("\033[m");  // Resets the terminal to default.
+#endif  // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+  va_end(args);
+}
+
+// Text printed in Google Test's text output and --gunit_list_tests
+// output to label the type parameter and value parameter for a test.
+static const char kTypeParamLabel[] = "TypeParam";
+static const char kValueParamLabel[] = "GetParam()";
+
+void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+  const char* const type_param = test_info.type_param();
+  const char* const value_param = test_info.value_param();
+
+  if (type_param != NULL || value_param != NULL) {
+    printf(", where ");
+    if (type_param != NULL) {
+      printf("%s = %s", kTypeParamLabel, type_param);
+      if (value_param != NULL)
+        printf(" and ");
+    }
+    if (value_param != NULL) {
+      printf("%s = %s", kValueParamLabel, value_param);
+    }
+  }
+}
+
+// This class implements the TestEventListener interface.
+//
+// Class PrettyUnitTestResultPrinter is copyable.
+class PrettyUnitTestResultPrinter : public TestEventListener {
+ public:
+  PrettyUnitTestResultPrinter() {}
+  static void PrintTestName(const char * test_case, const char * test) {
+    printf("%s.%s", test_case, test);
+  }
+
+  // The following methods override what's in the TestEventListener class.
+  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestCaseStart(const TestCase& test_case);
+  virtual void OnTestStart(const TestInfo& test_info);
+  virtual void OnTestPartResult(const TestPartResult& result);
+  virtual void OnTestEnd(const TestInfo& test_info);
+  virtual void OnTestCaseEnd(const TestCase& test_case);
+  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+
+ private:
+  static void PrintFailedTests(const UnitTest& unit_test);
+};
+
+  // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+    const UnitTest& unit_test, int iteration) {
+  if (GTEST_FLAG(repeat) != 1)
+    printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+  const char* const filter = GTEST_FLAG(filter).c_str();
+
+  // Prints the filter if it's not *.  This reminds the user that some
+  // tests may be skipped.
+  if (!String::CStringEquals(filter, kUniversalFilter)) {
+    ColoredPrintf(COLOR_YELLOW,
+                  "Note: %s filter = %s\n", GTEST_NAME_, filter);
+  }
+
+  if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+    const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
+    ColoredPrintf(COLOR_YELLOW,
+                  "Note: This is test shard %d of %s.\n",
+                  static_cast<int>(shard_index) + 1,
+                  internal::posix::GetEnv(kTestTotalShards));
+  }
+
+  if (GTEST_FLAG(shuffle)) {
+    ColoredPrintf(COLOR_YELLOW,
+                  "Note: Randomizing tests' orders with a seed of %d .\n",
+                  unit_test.random_seed());
+  }
+
+  ColoredPrintf(COLOR_GREEN,  "[==========] ");
+  printf("Running %s from %s.\n",
+         FormatTestCount(unit_test.test_to_run_count()).c_str(),
+         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+    const UnitTest& /*unit_test*/) {
+  ColoredPrintf(COLOR_GREEN,  "[----------] ");
+  printf("Global test environment set-up.\n");
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+  const std::string counts =
+      FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+  ColoredPrintf(COLOR_GREEN, "[----------] ");
+  printf("%s from %s", counts.c_str(), test_case.name());
+  if (test_case.type_param() == NULL) {
+    printf("\n");
+  } else {
+    printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param());
+  }
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
+  ColoredPrintf(COLOR_GREEN,  "[ RUN      ] ");
+  PrintTestName(test_info.test_case_name(), test_info.name());
+  printf("\n");
+  fflush(stdout);
+}
+
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+    const TestPartResult& result) {
+  // If the test part succeeded, we don't need to do anything.
+  if (result.type() == TestPartResult::kSuccess)
+    return;
+
+  // Print failure message from the assertion (e.g. expected this and got that).
+  PrintTestPartResult(result);
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+  if (test_info.result()->Passed()) {
+    ColoredPrintf(COLOR_GREEN, "[       OK ] ");
+  } else {
+    ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
+  }
+  PrintTestName(test_info.test_case_name(), test_info.name());
+  if (test_info.result()->Failed())
+    PrintFullTestCommentIfPresent(test_info);
+
+  if (GTEST_FLAG(print_time)) {
+    printf(" (%s ms)\n", internal::StreamableToString(
+           test_info.result()->elapsed_time()).c_str());
+  } else {
+    printf("\n");
+  }
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+  if (!GTEST_FLAG(print_time)) return;
+
+  const std::string counts =
+      FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+  ColoredPrintf(COLOR_GREEN, "[----------] ");
+  printf("%s from %s (%s ms total)\n\n",
+         counts.c_str(), test_case.name(),
+         internal::StreamableToString(test_case.elapsed_time()).c_str());
+  fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+    const UnitTest& /*unit_test*/) {
+  ColoredPrintf(COLOR_GREEN,  "[----------] ");
+  printf("Global test environment tear-down\n");
+  fflush(stdout);
+}
+
+// Internal helper for printing the list of failed tests.
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+  const int failed_test_count = unit_test.failed_test_count();
+  if (failed_test_count == 0) {
+    return;
+  }
+
+  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+    const TestCase& test_case = *unit_test.GetTestCase(i);
+    if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
+      continue;
+    }
+    for (int j = 0; j < test_case.total_test_count(); ++j) {
+      const TestInfo& test_info = *test_case.GetTestInfo(j);
+      if (!test_info.should_run() || test_info.result()->Passed()) {
+        continue;
+      }
+      ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
+      printf("%s.%s", test_case.name(), test_info.name());
+      PrintFullTestCommentIfPresent(test_info);
+      printf("\n");
+    }
+  }
+}
+
+void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+                                                     int /*iteration*/) {
+  ColoredPrintf(COLOR_GREEN,  "[==========] ");
+  printf("%s from %s ran.",
+         FormatTestCount(unit_test.test_to_run_count()).c_str(),
+         FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+  if (GTEST_FLAG(print_time)) {
+    printf(" (%s ms total)",
+           internal::StreamableToString(unit_test.elapsed_time()).c_str());
+  }
+  printf("\n");
+  ColoredPrintf(COLOR_GREEN,  "[  PASSED  ] ");
+  printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+  int num_failures = unit_test.failed_test_count();
+  if (!unit_test.Passed()) {
+    const int failed_test_count = unit_test.failed_test_count();
+    ColoredPrintf(COLOR_RED,  "[  FAILED  ] ");
+    printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
+    PrintFailedTests(unit_test);
+    printf("\n%2d FAILED %s\n", num_failures,
+                        num_failures == 1 ? "TEST" : "TESTS");
+  }
+
+  int num_disabled = unit_test.reportable_disabled_test_count();
+  if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+    if (!num_failures) {
+      printf("\n");  // Add a spacer if no FAILURE banner is displayed.
+    }
+    ColoredPrintf(COLOR_YELLOW,
+                  "  YOU HAVE %d DISABLED %s\n\n",
+                  num_disabled,
+                  num_disabled == 1 ? "TEST" : "TESTS");
+  }
+  // Ensure that Google Test output is printed before, e.g., heapchecker output.
+  fflush(stdout);
+}
+
+// End PrettyUnitTestResultPrinter
+
+// class TestEventRepeater
+//
+// This class forwards events to other event listeners.
+class TestEventRepeater : public TestEventListener {
+ public:
+  TestEventRepeater() : forwarding_enabled_(true) {}
+  virtual ~TestEventRepeater();
+  void Append(TestEventListener *listener);
+  TestEventListener* Release(TestEventListener* listener);
+
+  // Controls whether events will be forwarded to listeners_. Set to false
+  // in death test child processes.
+  bool forwarding_enabled() const { return forwarding_enabled_; }
+  void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+  virtual void OnTestProgramStart(const UnitTest& unit_test);
+  virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
+  virtual void OnTestCaseStart(const TestCase& test_case);
+  virtual void OnTestStart(const TestInfo& test_info);
+  virtual void OnTestPartResult(const TestPartResult& result);
+  virtual void OnTestEnd(const TestInfo& test_info);
+  virtual void OnTestCaseEnd(const TestCase& test_case);
+  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
+  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+  virtual void OnTestProgramEnd(const UnitTest& unit_test);
+
+ private:
+  // Controls whether events will be forwarded to listeners_. Set to false
+  // in death test child processes.
+  bool forwarding_enabled_;
+  // The list of listeners that receive events.
+  std::vector<TestEventListener*> listeners_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
+};
+
+TestEventRepeater::~TestEventRepeater() {
+  ForEach(listeners_, Delete<TestEventListener>);
+}
+
+void TestEventRepeater::Append(TestEventListener *listener) {
+  listeners_.push_back(listener);
+}
+
+// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+  for (size_t i = 0; i < listeners_.size(); ++i) {
+    if (listeners_[i] == listener) {
+      listeners_.erase(listeners_.begin() + i);
+      return listener;
+    }
+  }
+
+  return NULL;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
+#define GTEST_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+  if (forwarding_enabled_) { \
+    for (size_t i = 0; i < listeners_.size(); i++) { \
+      listeners_[i]->Name(parameter); \
+    } \
+  } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+  if (forwarding_enabled_) { \
+    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
+      listeners_[i]->Name(parameter); \
+    } \
+  } \
+}
+
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
+GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
+
+#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
+
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+                                             int iteration) {
+  if (forwarding_enabled_) {
+    for (size_t i = 0; i < listeners_.size(); i++) {
+      listeners_[i]->OnTestIterationStart(unit_test, iteration);
+    }
+  }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+                                           int iteration) {
+  if (forwarding_enabled_) {
+    for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
+      listeners_[i]->OnTestIterationEnd(unit_test, iteration);
+    }
+  }
+}
+
+// End TestEventRepeater
+
+// This class generates an XML output file.
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+  explicit XmlUnitTestResultPrinter(const char* output_file);
+
+  virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+
+ private:
+  // Is c a whitespace character that is normalized to a space character
+  // when it appears in an XML attribute value?
+  static bool IsNormalizableWhitespace(char c) {
+    return c == 0x9 || c == 0xA || c == 0xD;
+  }
+
+  // May c appear in a well-formed XML document?
+  static bool IsValidXmlCharacter(char c) {
+    return IsNormalizableWhitespace(c) || c >= 0x20;
+  }
+
+  // Returns an XML-escaped copy of the input string str.  If
+  // is_attribute is true, the text is meant to appear as an attribute
+  // value, and normalizable whitespace is preserved by replacing it
+  // with character references.
+  static std::string EscapeXml(const std::string& str, bool is_attribute);
+
+  // Returns the given string with all characters invalid in XML removed.
+  static std::string RemoveInvalidXmlCharacters(const std::string& str);
+
+  // Convenience wrapper around EscapeXml when str is an attribute value.
+  static std::string EscapeXmlAttribute(const std::string& str) {
+    return EscapeXml(str, true);
+  }
+
+  // Convenience wrapper around EscapeXml when str is not an attribute value.
+  static std::string EscapeXmlText(const char* str) {
+    return EscapeXml(str, false);
+  }
+
+  // Verifies that the given attribute belongs to the given element and
+  // streams the attribute as XML.
+  static void OutputXmlAttribute(std::ostream* stream,
+                                 const std::string& element_name,
+                                 const std::string& name,
+                                 const std::string& value);
+
+  // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+  static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
+
+  // Streams an XML representation of a TestInfo object.
+  static void OutputXmlTestInfo(::std::ostream* stream,
+                                const char* test_case_name,
+                                const TestInfo& test_info);
+
+  // Prints an XML representation of a TestCase object
+  static void PrintXmlTestCase(::std::ostream* stream,
+                               const TestCase& test_case);
+
+  // Prints an XML summary of unit_test to output stream out.
+  static void PrintXmlUnitTest(::std::ostream* stream,
+                               const UnitTest& unit_test);
+
+  // Produces a string representing the test properties in a result as space
+  // delimited XML attributes based on the property key="value" pairs.
+  // When the std::string is not empty, it includes a space at the beginning,
+  // to delimit this attribute from prior attributes.
+  static std::string TestPropertiesAsXmlAttributes(const TestResult& result);
+
+  // The output file.
+  const std::string output_file_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
+};
+
+// Creates a new XmlUnitTestResultPrinter.
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
+    : output_file_(output_file) {
+  if (output_file_.c_str() == NULL || output_file_.empty()) {
+    fprintf(stderr, "XML output file may not be null\n");
+    fflush(stderr);
+    exit(EXIT_FAILURE);
+  }
+}
+
+// Called after the unit test ends.
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+                                                  int /*iteration*/) {
+  FILE* xmlout = NULL;
+  FilePath output_file(output_file_);
+  FilePath output_dir(output_file.RemoveFileName());
+
+  if (output_dir.CreateDirectoriesRecursively()) {
+    xmlout = posix::FOpen(output_file_.c_str(), "w");
+  }
+  if (xmlout == NULL) {
+    // TODO(wan): report the reason of the failure.
+    //
+    // We don't do it for now as:
+    //
+    //   1. There is no urgent need for it.
+    //   2. It's a bit involved to make the errno variable thread-safe on
+    //      all three operating systems (Linux, Windows, and Mac OS).
+    //   3. To interpret the meaning of errno in a thread-safe way,
+    //      we need the strerror_r() function, which is not available on
+    //      Windows.
+    fprintf(stderr,
+            "Unable to open file \"%s\"\n",
+            output_file_.c_str());
+    fflush(stderr);
+    exit(EXIT_FAILURE);
+  }
+  std::stringstream stream;
+  PrintXmlUnitTest(&stream, unit_test);
+  fprintf(xmlout, "%s", StringStreamToString(&stream).c_str());
+  fclose(xmlout);
+}
+
+// Returns an XML-escaped copy of the input string str.  If is_attribute
+// is true, the text is meant to appear as an attribute value, and
+// normalizable whitespace is preserved by replacing it with character
+// references.
+//
+// Invalid XML characters in str, if any, are stripped from the output.
+// It is expected that most, if not all, of the text processed by this
+// module will consist of ordinary English text.
+// If this module is ever modified to produce version 1.1 XML output,
+// most invalid characters can be retained using character references.
+// TODO(wan): It might be nice to have a minimally invasive, human-readable
+// escaping scheme for invalid characters, rather than dropping them.
+std::string XmlUnitTestResultPrinter::EscapeXml(
+    const std::string& str, bool is_attribute) {
+  Message m;
+
+  for (size_t i = 0; i < str.size(); ++i) {
+    const char ch = str[i];
+    switch (ch) {
+      case '<':
+        m << "&lt;";
+        break;
+      case '>':
+        m << "&gt;";
+        break;
+      case '&':
+        m << "&amp;";
+        break;
+      case '\'':
+        if (is_attribute)
+          m << "&apos;";
+        else
+          m << '\'';
+        break;
+      case '"':
+        if (is_attribute)
+          m << "&quot;";
+        else
+          m << '"';
+        break;
+      default:
+        if (IsValidXmlCharacter(ch)) {
+          if (is_attribute && IsNormalizableWhitespace(ch))
+            m << "&#x" << String::FormatByte(static_cast<unsigned char>(ch))
+              << ";";
+          else
+            m << ch;
+        }
+        break;
+    }
+  }
+
+  return m.GetString();
+}
+
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(
+    const std::string& str) {
+  std::string output;
+  output.reserve(str.size());
+  for (std::string::const_iterator it = str.begin(); it != str.end(); ++it)
+    if (IsValidXmlCharacter(*it))
+      output.push_back(*it);
+
+  return output;
+}
+
+// The following routines generate an XML representation of a UnitTest
+// object.
+//
+// This is how Google Test concepts map to the DTD:
+//
+// <testsuites name="AllTests">        <-- corresponds to a UnitTest object
+//   <testsuite name="testcase-name">  <-- corresponds to a TestCase object
+//     <testcase name="test-name">     <-- corresponds to a TestInfo object
+//       <failure message="...">...</failure>
+//       <failure message="...">...</failure>
+//       <failure message="...">...</failure>
+//                                     <-- individual assertion failures
+//     </testcase>
+//   </testsuite>
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+  ::std::stringstream ss;
+  ss << (static_cast<double>(ms) * 1e-3);
+  return ss.str();
+}
+
+static bool PortableLocaltime(time_t seconds, struct tm* out) {
+#if defined(_MSC_VER)
+  return localtime_s(out, &seconds) == 0;
+#elif defined(__MINGW32__) || defined(__MINGW64__)
+  // MINGW <time.h> provides neither localtime_r nor localtime_s, but uses
+  // Windows' localtime(), which has a thread-local tm buffer.
+  struct tm* tm_ptr = localtime(&seconds);  // NOLINT
+  if (tm_ptr == NULL)
+    return false;
+  *out = *tm_ptr;
+  return true;
+#else
+  return localtime_r(&seconds, out) != NULL;
+#endif
+}
+
+// Converts the given epoch time in milliseconds to a date string in the ISO
+// 8601 format, without the timezone information.
+std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) {
+  struct tm time_struct;
+  if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))
+    return "";
+  // YYYY-MM-DDThh:mm:ss
+  return StreamableToString(time_struct.tm_year + 1900) + "-" +
+      String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" +
+      String::FormatIntWidth2(time_struct.tm_mday) + "T" +
+      String::FormatIntWidth2(time_struct.tm_hour) + ":" +
+      String::FormatIntWidth2(time_struct.tm_min) + ":" +
+      String::FormatIntWidth2(time_struct.tm_sec);
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+                                                     const char* data) {
+  const char* segment = data;
+  *stream << "<![CDATA[";
+  for (;;) {
+    const char* const next_segment = strstr(segment, "]]>");
+    if (next_segment != NULL) {
+      stream->write(
+          segment, static_cast<std::streamsize>(next_segment - segment));
+      *stream << "]]>]]&gt;<![CDATA[";
+      segment = next_segment + strlen("]]>");
+    } else {
+      *stream << segment;
+      break;
+    }
+  }
+  *stream << "]]>";
+}
+
+void XmlUnitTestResultPrinter::OutputXmlAttribute(
+    std::ostream* stream,
+    const std::string& element_name,
+    const std::string& name,
+    const std::string& value) {
+  const std::vector<std::string>& allowed_names =
+      GetReservedAttributesForElement(element_name);
+
+  GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
+                   allowed_names.end())
+      << "Attribute " << name << " is not allowed for element <" << element_name
+      << ">.";
+
+  *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\"";
+}
+
+// Prints an XML representation of a TestInfo object.
+// TODO(wan): There is also value in printing properties with the plain printer.
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+                                                 const char* test_case_name,
+                                                 const TestInfo& test_info) {
+  const TestResult& result = *test_info.result();
+  const std::string kTestcase = "testcase";
+
+  *stream << "    <testcase";
+  OutputXmlAttribute(stream, kTestcase, "name", test_info.name());
+
+  if (test_info.value_param() != NULL) {
+    OutputXmlAttribute(stream, kTestcase, "value_param",
+                       test_info.value_param());
+  }
+  if (test_info.type_param() != NULL) {
+    OutputXmlAttribute(stream, kTestcase, "type_param", test_info.type_param());
+  }
+
+  OutputXmlAttribute(stream, kTestcase, "status",
+                     test_info.should_run() ? "run" : "notrun");
+  OutputXmlAttribute(stream, kTestcase, "time",
+                     FormatTimeInMillisAsSeconds(result.elapsed_time()));
+  OutputXmlAttribute(stream, kTestcase, "classname", test_case_name);
+  *stream << TestPropertiesAsXmlAttributes(result);
+
+  int failures = 0;
+  for (int i = 0; i < result.total_part_count(); ++i) {
+    const TestPartResult& part = result.GetTestPartResult(i);
+    if (part.failed()) {
+      if (++failures == 1) {
+        *stream << ">\n";
+      }
+      const string location = internal::FormatCompilerIndependentFileLocation(
+          part.file_name(), part.line_number());
+      const string summary = location + "\n" + part.summary();
+      *stream << "      <failure message=\""
+              << EscapeXmlAttribute(summary.c_str())
+              << "\" type=\"\">";
+      const string detail = location + "\n" + part.message();
+      OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());
+      *stream << "</failure>\n";
+    }
+  }
+
+  if (failures == 0)
+    *stream << " />\n";
+  else
+    *stream << "    </testcase>\n";
+}
+
+// Prints an XML representation of a TestCase object
+void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream,
+                                                const TestCase& test_case) {
+  const std::string kTestsuite = "testsuite";
+  *stream << "  <" << kTestsuite;
+  OutputXmlAttribute(stream, kTestsuite, "name", test_case.name());
+  OutputXmlAttribute(stream, kTestsuite, "tests",
+                     StreamableToString(test_case.reportable_test_count()));
+  OutputXmlAttribute(stream, kTestsuite, "failures",
+                     StreamableToString(test_case.failed_test_count()));
+  OutputXmlAttribute(
+      stream, kTestsuite, "disabled",
+      StreamableToString(test_case.reportable_disabled_test_count()));
+  OutputXmlAttribute(stream, kTestsuite, "errors", "0");
+  OutputXmlAttribute(stream, kTestsuite, "time",
+                     FormatTimeInMillisAsSeconds(test_case.elapsed_time()));
+  *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result())
+          << ">\n";
+
+  for (int i = 0; i < test_case.total_test_count(); ++i) {
+    if (test_case.GetTestInfo(i)->is_reportable())
+      OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i));
+  }
+  *stream << "  </" << kTestsuite << ">\n";
+}
+
+// Prints an XML summary of unit_test to output stream out.
+void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,
+                                                const UnitTest& unit_test) {
+  const std::string kTestsuites = "testsuites";
+
+  *stream << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
+  *stream << "<" << kTestsuites;
+
+  OutputXmlAttribute(stream, kTestsuites, "tests",
+                     StreamableToString(unit_test.reportable_test_count()));
+  OutputXmlAttribute(stream, kTestsuites, "failures",
+                     StreamableToString(unit_test.failed_test_count()));
+  OutputXmlAttribute(
+      stream, kTestsuites, "disabled",
+      StreamableToString(unit_test.reportable_disabled_test_count()));
+  OutputXmlAttribute(stream, kTestsuites, "errors", "0");
+  OutputXmlAttribute(
+      stream, kTestsuites, "timestamp",
+      FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));
+  OutputXmlAttribute(stream, kTestsuites, "time",
+                     FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
+
+  if (GTEST_FLAG(shuffle)) {
+    OutputXmlAttribute(stream, kTestsuites, "random_seed",
+                       StreamableToString(unit_test.random_seed()));
+  }
+
+  *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result());
+
+  OutputXmlAttribute(stream, kTestsuites, "name", "AllTests");
+  *stream << ">\n";
+
+  for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+    if (unit_test.GetTestCase(i)->reportable_test_count() > 0)
+      PrintXmlTestCase(stream, *unit_test.GetTestCase(i));
+  }
+  *stream << "</" << kTestsuites << ">\n";
+}
+
+// Produces a string representing the test properties in a result as space
+// delimited XML attributes based on the property key="value" pairs.
+std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+    const TestResult& result) {
+  Message attributes;
+  for (int i = 0; i < result.test_property_count(); ++i) {
+    const TestProperty& property = result.GetTestProperty(i);
+    attributes << " " << property.key() << "="
+        << "\"" << EscapeXmlAttribute(property.value()) << "\"";
+  }
+  return attributes.GetString();
+}
+
+// End XmlUnitTestResultPrinter
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Checks if str contains '=', '&', '%' or '\n' characters. If yes,
+// replaces them by "%xx" where xx is their hexadecimal value. For
+// example, replaces "=" with "%3D".  This algorithm is O(strlen(str))
+// in both time and space -- important as the input str may contain an
+// arbitrarily long test failure message and stack trace.
+string StreamingListener::UrlEncode(const char* str) {
+  string result;
+  result.reserve(strlen(str) + 1);
+  for (char ch = *str; ch != '\0'; ch = *++str) {
+    switch (ch) {
+      case '%':
+      case '=':
+      case '&':
+      case '\n':
+        result.append("%" + String::FormatByte(static_cast<unsigned char>(ch)));
+        break;
+      default:
+        result.push_back(ch);
+        break;
+    }
+  }
+  return result;
+}
+
+void StreamingListener::SocketWriter::MakeConnection() {
+  GTEST_CHECK_(sockfd_ == -1)
+      << "MakeConnection() can't be called when there is already a connection.";
+
+  addrinfo hints;
+  memset(&hints, 0, sizeof(hints));
+  hints.ai_family = AF_UNSPEC;    // To allow both IPv4 and IPv6 addresses.
+  hints.ai_socktype = SOCK_STREAM;
+  addrinfo* servinfo = NULL;
+
+  // Use the getaddrinfo() to get a linked list of IP addresses for
+  // the given host name.
+  const int error_num = getaddrinfo(
+      host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);
+  if (error_num != 0) {
+    GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: "
+                        << gai_strerror(error_num);
+  }
+
+  // Loop through all the results and connect to the first we can.
+  for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL;
+       cur_addr = cur_addr->ai_next) {
+    sockfd_ = socket(
+        cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);
+    if (sockfd_ != -1) {
+      // Connect the client socket to the server socket.
+      if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {
+        close(sockfd_);
+        sockfd_ = -1;
+      }
+    }
+  }
+
+  freeaddrinfo(servinfo);  // all done with this structure
+
+  if (sockfd_ == -1) {
+    GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to "
+                        << host_name_ << ":" << port_num_;
+  }
+}
+
+// End of class Streaming Listener
+#endif  // GTEST_CAN_STREAM_RESULTS__
+
+// Class ScopedTrace
+
+// Pushes the given source file location and message onto a per-thread
+// trace stack maintained by Google Test.
+ScopedTrace::ScopedTrace(const char* file, int line, const Message& message)
+    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
+  TraceInfo trace;
+  trace.file = file;
+  trace.line = line;
+  trace.message = message.GetString();
+
+  UnitTest::GetInstance()->PushGTestTrace(trace);
+}
+
+// Pops the info pushed by the c'tor.
+ScopedTrace::~ScopedTrace()
+    GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
+  UnitTest::GetInstance()->PopGTestTrace();
+}
+
+
+// class OsStackTraceGetter
+
+const char* const OsStackTraceGetterInterface::kElidedFramesMarker =
+    "... " GTEST_NAME_ " internal frames ...";
+
+string OsStackTraceGetter::CurrentStackTrace(int /*max_depth*/,
+                                             int /*skip_count*/) {
+  return "";
+}
+
+void OsStackTraceGetter::UponLeavingGTest() {}
+
+// A helper class that creates the premature-exit file in its
+// constructor and deletes the file in its destructor.
+class ScopedPrematureExitFile {
+ public:
+  explicit ScopedPrematureExitFile(const char* premature_exit_filepath)
+      : premature_exit_filepath_(premature_exit_filepath) {
+    // If a path to the premature-exit file is specified...
+    if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') {
+      // create the file with a single "0" character in it.  I/O
+      // errors are ignored as there's nothing better we can do and we
+      // don't want to fail the test because of this.
+      FILE* pfile = posix::FOpen(premature_exit_filepath, "w");
+      fwrite("0", 1, 1, pfile);
+      fclose(pfile);
+    }
+  }
+
+  ~ScopedPrematureExitFile() {
+    if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') {
+      remove(premature_exit_filepath_);
+    }
+  }
+
+ private:
+  const char* const premature_exit_filepath_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile);
+};
+
+}  // namespace internal
+
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+    : repeater_(new internal::TestEventRepeater()),
+      default_result_printer_(NULL),
+      default_xml_generator_(NULL) {
+}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output.  Can be removed from the listeners list to shut down default
+// console output.  Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+  repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it.  It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+  if (listener == default_result_printer_)
+    default_result_printer_ = NULL;
+  else if (listener == default_xml_generator_)
+    default_xml_generator_ = NULL;
+  return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+  if (default_result_printer_ != listener) {
+    // It is an error to pass this method a listener that is already in the
+    // list.
+    delete Release(default_result_printer_);
+    default_result_printer_ = listener;
+    if (listener != NULL)
+      Append(listener);
+  }
+}
+
+// Sets the default_xml_generator attribute to the provided listener.  The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+  if (default_xml_generator_ != listener) {
+    // It is an error to pass this method a listener that is already in the
+    // list.
+    delete Release(default_xml_generator_);
+    default_xml_generator_ = listener;
+    if (listener != NULL)
+      Append(listener);
+  }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+  return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+  repeater_->set_forwarding_enabled(false);
+}
+
+// class UnitTest
+
+// Gets the singleton UnitTest object.  The first time this method is
+// called, a UnitTest object is constructed and returned.  Consecutive
+// calls will return the same object.
+//
+// We don't protect this under mutex_ as a user is not supposed to
+// call this before main() starts, from which point on the return
+// value will never change.
+UnitTest* UnitTest::GetInstance() {
+  // When compiled with MSVC 7.1 in optimized mode, destroying the
+  // UnitTest object upon exiting the program messes up the exit code,
+  // causing successful tests to appear failed.  We have to use a
+  // different implementation in this case to bypass the compiler bug.
+  // This implementation makes the compiler happy, at the cost of
+  // leaking the UnitTest object.
+
+  // CodeGear C++Builder insists on a public destructor for the
+  // default implementation.  Use this implementation to keep good OO
+  // design with private destructor.
+
+#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+  static UnitTest* const instance = new UnitTest;
+  return instance;
+#else
+  static UnitTest instance;
+  return &instance;
+#endif  // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+}
+
+// Gets the number of successful test cases.
+int UnitTest::successful_test_case_count() const {
+  return impl()->successful_test_case_count();
+}
+
+// Gets the number of failed test cases.
+int UnitTest::failed_test_case_count() const {
+  return impl()->failed_test_case_count();
+}
+
+// Gets the number of all test cases.
+int UnitTest::total_test_case_count() const {
+  return impl()->total_test_case_count();
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTest::test_case_to_run_count() const {
+  return impl()->test_case_to_run_count();
+}
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+  return impl()->successful_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTest::reportable_disabled_test_count() const {
+  return impl()->reportable_disabled_test_count();
+}
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+  return impl()->disabled_test_count();
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTest::reportable_test_count() const {
+  return impl()->reportable_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the time of the test program start, in ms from the start of the
+// UNIX epoch.
+internal::TimeInMillis UnitTest::start_timestamp() const {
+    return impl()->start_timestamp();
+}
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+  return impl()->elapsed_time();
+}
+
+// Returns true iff the unit test passed (i.e. all test cases passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true iff the unit test failed (i.e. some test case failed
+// or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+const TestCase* UnitTest::GetTestCase(int i) const {
+  return impl()->GetTestCase(i);
+}
+
+// Returns the TestResult containing information on test failures and
+// properties logged outside of individual test cases.
+const TestResult& UnitTest::ad_hoc_test_result() const {
+  return *impl()->ad_hoc_test_result();
+}
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+TestCase* UnitTest::GetMutableTestCase(int i) {
+  return impl()->GetMutableTestCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+  return *impl()->listeners();
+}
+
+// Registers and returns a global test environment.  When a test
+// program is run, all global test environments will be set-up in the
+// order they were registered.  After all tests in the program have
+// finished, all global test environments will be torn-down in the
+// *reverse* order they were registered.
+//
+// The UnitTest object takes ownership of the given environment.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+Environment* UnitTest::AddEnvironment(Environment* env) {
+  if (env == NULL) {
+    return NULL;
+  }
+
+  impl_->environments().push_back(env);
+  return env;
+}
+
+// Adds a TestPartResult to the current TestResult object.  All Google Test
+// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
+// this to report their results.  The user code should use the
+// assertion macros instead of calling this directly.
+void UnitTest::AddTestPartResult(
+    TestPartResult::Type result_type,
+    const char* file_name,
+    int line_number,
+    const std::string& message,
+    const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) {
+  Message msg;
+  msg << message;
+
+  internal::MutexLock lock(&mutex_);
+  if (impl_->gtest_trace_stack().size() > 0) {
+    msg << "\n" << GTEST_NAME_ << " trace:";
+
+    for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
+         i > 0; --i) {
+      const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+      msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+          << " " << trace.message;
+    }
+  }
+
+  if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
+    msg << internal::kStackTraceMarker << os_stack_trace;
+  }
+
+  const TestPartResult result =
+    TestPartResult(result_type, file_name, line_number,
+                   msg.GetString().c_str());
+  impl_->GetTestPartResultReporterForCurrentThread()->
+      ReportTestPartResult(result);
+
+  if (result_type != TestPartResult::kSuccess) {
+    // gtest_break_on_failure takes precedence over
+    // gtest_throw_on_failure.  This allows a user to set the latter
+    // in the code (perhaps in order to use Google Test assertions
+    // with another testing framework) and specify the former on the
+    // command line for debugging.
+    if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+      // Using DebugBreak on Windows allows gtest to still break into a debugger
+      // when a failure happens and both the --gtest_break_on_failure and
+      // the --gtest_catch_exceptions flags are specified.
+      DebugBreak();
+#else
+      // Dereference NULL through a volatile pointer to prevent the compiler
+      // from removing. We use this rather than abort() or __builtin_trap() for
+      // portability: Symbian doesn't implement abort() well, and some debuggers
+      // don't correctly trap abort().
+      *static_cast<volatile int*>(NULL) = 1;
+#endif  // GTEST_OS_WINDOWS
+    } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+      throw internal::GoogleTestFailureException(result);
+#else
+      // We cannot call abort() as it generates a pop-up in debug mode
+      // that cannot be suppressed in VC 7.1 or below.
+      exit(1);
+#endif
+    }
+  }
+}
+
+// Adds a TestProperty to the current TestResult object when invoked from
+// inside a test, to current TestCase's ad_hoc_test_result_ when invoked
+// from SetUpTestCase or TearDownTestCase, or to the global property set
+// when invoked elsewhere.  If the result already contains a property with
+// the same key, the value will be updated.
+void UnitTest::RecordProperty(const std::string& key,
+                              const std::string& value) {
+  impl_->RecordProperty(TestProperty(key, value));
+}
+
+// Runs all tests in this UnitTest object and prints the result.
+// Returns 0 if successful, or 1 otherwise.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+int UnitTest::Run() {
+  const bool in_death_test_child_process =
+      internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+  // Google Test implements this protocol for catching that a test
+  // program exits before returning control to Google Test:
+  //
+  //   1. Upon start, Google Test creates a file whose absolute path
+  //      is specified by the environment variable
+  //      TEST_PREMATURE_EXIT_FILE.
+  //   2. When Google Test has finished its work, it deletes the file.
+  //
+  // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before
+  // running a Google-Test-based test program and check the existence
+  // of the file at the end of the test execution to see if it has
+  // exited prematurely.
+
+  // If we are in the child process of a death test, don't
+  // create/delete the premature exit file, as doing so is unnecessary
+  // and will confuse the parent process.  Otherwise, create/delete
+  // the file upon entering/leaving this function.  If the program
+  // somehow exits before this function has a chance to return, the
+  // premature-exit file will be left undeleted, causing a test runner
+  // that understands the premature-exit-file protocol to report the
+  // test as having failed.
+  const internal::ScopedPrematureExitFile premature_exit_file(
+      in_death_test_child_process ?
+      NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE"));
+
+  // Captures the value of GTEST_FLAG(catch_exceptions).  This value will be
+  // used for the duration of the program.
+  impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));
+
+#if GTEST_HAS_SEH
+  // Either the user wants Google Test to catch exceptions thrown by the
+  // tests or this is executing in the context of death test child
+  // process. In either case the user does not want to see pop-up dialogs
+  // about crashes - they are expected.
+  if (impl()->catch_exceptions() || in_death_test_child_process) {
+# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+    // SetErrorMode doesn't exist on CE.
+    SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
+                 SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+# endif  // !GTEST_OS_WINDOWS_MOBILE
+
+# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+    // Death test children can be terminated with _abort().  On Windows,
+    // _abort() can show a dialog with a warning message.  This forces the
+    // abort message to go to stderr instead.
+    _set_error_mode(_OUT_TO_STDERR);
+# endif
+
+# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+    // In the debug version, Visual Studio pops up a separate dialog
+    // offering a choice to debug the aborted program. We need to suppress
+    // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+    // executed. Google Test will notify the user of any unexpected
+    // failure via stderr.
+    //
+    // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
+    // Users of prior VC versions shall suffer the agony and pain of
+    // clicking through the countless debug dialogs.
+    // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
+    // debug mode when compiled with VC 7.1 or lower.
+    if (!GTEST_FLAG(break_on_failure))
+      _set_abort_behavior(
+          0x0,                                    // Clear the following flags:
+          _WRITE_ABORT_MSG | _CALL_REPORTFAULT);  // pop-up window, core dump.
+# endif
+  }
+#endif  // GTEST_HAS_SEH
+
+  return internal::HandleExceptionsInMethodIfSupported(
+      impl(),
+      &internal::UnitTestImpl::RunAllTests,
+      "auxiliary test code (environments or event listeners)") ? 0 : 1;
+}
+
+// Returns the working directory when the first TEST() or TEST_F() was
+// executed.
+const char* UnitTest::original_working_dir() const {
+  return impl_->original_working_dir_.c_str();
+}
+
+// Returns the TestCase object for the test that's currently running,
+// or NULL if no test is running.
+const TestCase* UnitTest::current_test_case() const
+    GTEST_LOCK_EXCLUDED_(mutex_) {
+  internal::MutexLock lock(&mutex_);
+  return impl_->current_test_case();
+}
+
+// Returns the TestInfo object for the test that's currently running,
+// or NULL if no test is running.
+const TestInfo* UnitTest::current_test_info() const
+    GTEST_LOCK_EXCLUDED_(mutex_) {
+  internal::MutexLock lock(&mutex_);
+  return impl_->current_test_info();
+}
+
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+#if GTEST_HAS_PARAM_TEST
+// Returns ParameterizedTestCaseRegistry object used to keep track of
+// value-parameterized tests and instantiate and register them.
+internal::ParameterizedTestCaseRegistry&
+    UnitTest::parameterized_test_registry()
+        GTEST_LOCK_EXCLUDED_(mutex_) {
+  return impl_->parameterized_test_registry();
+}
+#endif  // GTEST_HAS_PARAM_TEST
+
+// Creates an empty UnitTest.
+UnitTest::UnitTest() {
+  impl_ = new internal::UnitTestImpl(this);
+}
+
+// Destructor of UnitTest.
+UnitTest::~UnitTest() {
+  delete impl_;
+}
+
+// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+// Google Test trace stack.
+void UnitTest::PushGTestTrace(const internal::TraceInfo& trace)
+    GTEST_LOCK_EXCLUDED_(mutex_) {
+  internal::MutexLock lock(&mutex_);
+  impl_->gtest_trace_stack().push_back(trace);
+}
+
+// Pops a trace from the per-thread Google Test trace stack.
+void UnitTest::PopGTestTrace()
+    GTEST_LOCK_EXCLUDED_(mutex_) {
+  internal::MutexLock lock(&mutex_);
+  impl_->gtest_trace_stack().pop_back();
+}
+
+namespace internal {
+
+UnitTestImpl::UnitTestImpl(UnitTest* parent)
+    : parent_(parent),
+      GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */)
+      default_global_test_part_result_reporter_(this),
+      default_per_thread_test_part_result_reporter_(this),
+      GTEST_DISABLE_MSC_WARNINGS_POP_()
+      global_test_part_result_repoter_(
+          &default_global_test_part_result_reporter_),
+      per_thread_test_part_result_reporter_(
+          &default_per_thread_test_part_result_reporter_),
+#if GTEST_HAS_PARAM_TEST
+      parameterized_test_registry_(),
+      parameterized_tests_registered_(false),
+#endif  // GTEST_HAS_PARAM_TEST
+      last_death_test_case_(-1),
+      current_test_case_(NULL),
+      current_test_info_(NULL),
+      ad_hoc_test_result_(),
+      os_stack_trace_getter_(NULL),
+      post_flag_parse_init_performed_(false),
+      random_seed_(0),  // Will be overridden by the flag before first use.
+      random_(0),  // Will be reseeded before first use.
+      start_timestamp_(0),
+      elapsed_time_(0),
+#if GTEST_HAS_DEATH_TEST
+      death_test_factory_(new DefaultDeathTestFactory),
+#endif
+      // Will be overridden by the flag before first use.
+      catch_exceptions_(false) {
+  listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
+}
+
+UnitTestImpl::~UnitTestImpl() {
+  // Deletes every TestCase.
+  ForEach(test_cases_, internal::Delete<TestCase>);
+
+  // Deletes every Environment.
+  ForEach(environments_, internal::Delete<Environment>);
+
+  delete os_stack_trace_getter_;
+}
+
+// Adds a TestProperty to the current TestResult object when invoked in a
+// context of a test, to current test case's ad_hoc_test_result when invoke
+// from SetUpTestCase/TearDownTestCase, or to the global property set
+// otherwise.  If the result already contains a property with the same key,
+// the value will be updated.
+void UnitTestImpl::RecordProperty(const TestProperty& test_property) {
+  std::string xml_element;
+  TestResult* test_result;  // TestResult appropriate for property recording.
+
+  if (current_test_info_ != NULL) {
+    xml_element = "testcase";
+    test_result = &(current_test_info_->result_);
+  } else if (current_test_case_ != NULL) {
+    xml_element = "testsuite";
+    test_result = &(current_test_case_->ad_hoc_test_result_);
+  } else {
+    xml_element = "testsuites";
+    test_result = &ad_hoc_test_result_;
+  }
+  test_result->RecordProperty(xml_element, test_property);
+}
+
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+  if (internal_run_death_test_flag_.get() != NULL)
+    listeners()->SuppressEventForwarding();
+}
+#endif  // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+  const std::string& output_format = UnitTestOptions::GetOutputFormat();
+  if (output_format == "xml") {
+    listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+        UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+  } else if (output_format != "") {
+    printf("WARNING: unrecognized output format \"%s\" ignored.\n",
+           output_format.c_str());
+    fflush(stdout);
+  }
+}
+
+#if GTEST_CAN_STREAM_RESULTS_
+// Initializes event listeners for streaming test results in string form.
+// Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureStreamingOutput() {
+  const std::string& target = GTEST_FLAG(stream_result_to);
+  if (!target.empty()) {
+    const size_t pos = target.find(':');
+    if (pos != std::string::npos) {
+      listeners()->Append(new StreamingListener(target.substr(0, pos),
+                                                target.substr(pos+1)));
+    } else {
+      printf("WARNING: unrecognized streaming target \"%s\" ignored.\n",
+             target.c_str());
+      fflush(stdout);
+    }
+  }
+}
+#endif  // GTEST_CAN_STREAM_RESULTS_
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests.  Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+  // Ensures that this function does not execute more than once.
+  if (!post_flag_parse_init_performed_) {
+    post_flag_parse_init_performed_ = true;
+
+#if defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+    // Register to send notifications about key process state changes.
+    listeners()->Append(new GTEST_CUSTOM_TEST_EVENT_LISTENER_());
+#endif  // defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+
+#if GTEST_HAS_DEATH_TEST
+    InitDeathTestSubprocessControlInfo();
+    SuppressTestEventsIfInSubprocess();
+#endif  // GTEST_HAS_DEATH_TEST
+
+    // Registers parameterized tests. This makes parameterized tests
+    // available to the UnitTest reflection API without running
+    // RUN_ALL_TESTS.
+    RegisterParameterizedTests();
+
+    // Configures listeners for XML output. This makes it possible for users
+    // to shut down the default XML output before invoking RUN_ALL_TESTS.
+    ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+    // Configures listeners for streaming test results to the specified server.
+    ConfigureStreamingOutput();
+#endif  // GTEST_CAN_STREAM_RESULTS_
+  }
+}
+
+// A predicate that checks the name of a TestCase against a known
+// value.
+//
+// This is used for implementation of the UnitTest class only.  We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestCaseNameIs is copyable.
+class TestCaseNameIs {
+ public:
+  // Constructor.
+  explicit TestCaseNameIs(const std::string& name)
+      : name_(name) {}
+
+  // Returns true iff the name of test_case matches name_.
+  bool operator()(const TestCase* test_case) const {
+    return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
+  }
+
+ private:
+  std::string name_;
+};
+
+// Finds and returns a TestCase with the given name.  If one doesn't
+// exist, creates one and returns it.  It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
+//
+// Arguments:
+//
+//   test_case_name: name of the test case
+//   type_param:     the name of the test case's type parameter, or NULL if
+//                   this is not a typed or a type-parameterized test case.
+//   set_up_tc:      pointer to the function that sets up the test case
+//   tear_down_tc:   pointer to the function that tears down the test case
+TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
+                                    const char* type_param,
+                                    Test::SetUpTestCaseFunc set_up_tc,
+                                    Test::TearDownTestCaseFunc tear_down_tc) {
+  // Can we find a TestCase with the given name?
+  const std::vector<TestCase*>::const_iterator test_case =
+      std::find_if(test_cases_.begin(), test_cases_.end(),
+                   TestCaseNameIs(test_case_name));
+
+  if (test_case != test_cases_.end())
+    return *test_case;
+
+  // No.  Let's create one.
+  TestCase* const new_test_case =
+      new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc);
+
+  // Is this a death test case?
+  if (internal::UnitTestOptions::MatchesFilter(test_case_name,
+                                               kDeathTestCaseFilter)) {
+    // Yes.  Inserts the test case after the last death test case
+    // defined so far.  This only works when the test cases haven't
+    // been shuffled.  Otherwise we may end up running a death test
+    // after a non-death test.
+    ++last_death_test_case_;
+    test_cases_.insert(test_cases_.begin() + last_death_test_case_,
+                       new_test_case);
+  } else {
+    // No.  Appends to the end of the list.
+    test_cases_.push_back(new_test_case);
+  }
+
+  test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
+  return new_test_case;
+}
+
+// Helpers for setting up / tearing down the given environment.  They
+// are for use in the ForEach() function.
+static void SetUpEnvironment(Environment* env) { env->SetUp(); }
+static void TearDownEnvironment(Environment* env) { env->TearDown(); }
+
+// Runs all tests in this UnitTest object, prints the result, and
+// returns true if all tests are successful.  If any exception is
+// thrown during a test, the test is considered to be failed, but the
+// rest of the tests will still be run.
+//
+// When parameterized tests are enabled, it expands and registers
+// parameterized tests first in RegisterParameterizedTests().
+// All other functions called from RunAllTests() may safely assume that
+// parameterized tests are ready to be counted and run.
+bool UnitTestImpl::RunAllTests() {
+  // Makes sure InitGoogleTest() was called.
+  if (!GTestIsInitialized()) {
+    printf("%s",
+           "\nThis test program did NOT call ::testing::InitGoogleTest "
+           "before calling RUN_ALL_TESTS().  Please fix it.\n");
+    return false;
+  }
+
+  // Do not run any test if the --help flag was specified.
+  if (g_help_flag)
+    return true;
+
+  // Repeats the call to the post-flag parsing initialization in case the
+  // user didn't call InitGoogleTest.
+  PostFlagParsingInit();
+
+  // Even if sharding is not on, test runners may want to use the
+  // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+  // protocol.
+  internal::WriteToShardStatusFileIfNeeded();
+
+  // True iff we are in a subprocess for running a thread-safe-style
+  // death test.
+  bool in_subprocess_for_death_test = false;
+
+#if GTEST_HAS_DEATH_TEST
+  in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
+# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+  if (in_subprocess_for_death_test) {
+    GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_();
+  }
+# endif  // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+#endif  // GTEST_HAS_DEATH_TEST
+
+  const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+                                        in_subprocess_for_death_test);
+
+  // Compares the full test names with the filter to decide which
+  // tests to run.
+  const bool has_tests_to_run = FilterTests(should_shard
+                                              ? HONOR_SHARDING_PROTOCOL
+                                              : IGNORE_SHARDING_PROTOCOL) > 0;
+
+  // Lists the tests and exits if the --gtest_list_tests flag was specified.
+  if (GTEST_FLAG(list_tests)) {
+    // This must be called *after* FilterTests() has been called.
+    ListTestsMatchingFilter();
+    return true;
+  }
+
+  random_seed_ = GTEST_FLAG(shuffle) ?
+      GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
+  // True iff at least one test has failed.
+  bool failed = false;
+
+  TestEventListener* repeater = listeners()->repeater();
+
+  start_timestamp_ = GetTimeInMillis();
+  repeater->OnTestProgramStart(*parent_);
+
+  // How many times to repeat the tests?  We don't want to repeat them
+  // when we are inside the subprocess of a death test.
+  const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
+  // Repeats forever if the repeat count is negative.
+  const bool forever = repeat < 0;
+  for (int i = 0; forever || i != repeat; i++) {
+    // We want to preserve failures generated by ad-hoc test
+    // assertions executed before RUN_ALL_TESTS().
+    ClearNonAdHocTestResult();
+
+    const TimeInMillis start = GetTimeInMillis();
+
+    // Shuffles test cases and tests if requested.
+    if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+      random()->Reseed(random_seed_);
+      // This should be done before calling OnTestIterationStart(),
+      // such that a test event listener can see the actual test order
+      // in the event.
+      ShuffleTests();
+    }
+
+    // Tells the unit test event listeners that the tests are about to start.
+    repeater->OnTestIterationStart(*parent_, i);
+
+    // Runs each test case if there is at least one test to run.
+    if (has_tests_to_run) {
+      // Sets up all environments beforehand.
+      repeater->OnEnvironmentsSetUpStart(*parent_);
+      ForEach(environments_, SetUpEnvironment);
+      repeater->OnEnvironmentsSetUpEnd(*parent_);
+
+      // Runs the tests only if there was no fatal failure during global
+      // set-up.
+      if (!Test::HasFatalFailure()) {
+        for (int test_index = 0; test_index < total_test_case_count();
+             test_index++) {
+          GetMutableTestCase(test_index)->Run();
+        }
+      }
+
+      // Tears down all environments in reverse order afterwards.
+      repeater->OnEnvironmentsTearDownStart(*parent_);
+      std::for_each(environments_.rbegin(), environments_.rend(),
+                    TearDownEnvironment);
+      repeater->OnEnvironmentsTearDownEnd(*parent_);
+    }
+
+    elapsed_time_ = GetTimeInMillis() - start;
+
+    // Tells the unit test event listener that the tests have just finished.
+    repeater->OnTestIterationEnd(*parent_, i);
+
+    // Gets the result and clears it.
+    if (!Passed()) {
+      failed = true;
+    }
+
+    // Restores the original test order after the iteration.  This
+    // allows the user to quickly repro a failure that happens in the
+    // N-th iteration without repeating the first (N - 1) iterations.
+    // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+    // case the user somehow changes the value of the flag somewhere
+    // (it's always safe to unshuffle the tests).
+    UnshuffleTests();
+
+    if (GTEST_FLAG(shuffle)) {
+      // Picks a new random seed for each iteration.
+      random_seed_ = GetNextRandomSeed(random_seed_);
+    }
+  }
+
+  repeater->OnTestProgramEnd(*parent_);
+
+  return !failed;
+}
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+  const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+  if (test_shard_file != NULL) {
+    FILE* const file = posix::FOpen(test_shard_file, "w");
+    if (file == NULL) {
+      ColoredPrintf(COLOR_RED,
+                    "Could not write to the test shard status file \"%s\" "
+                    "specified by the %s environment variable.\n",
+                    test_shard_file, kTestShardStatusFile);
+      fflush(stdout);
+      exit(EXIT_FAILURE);
+    }
+    fclose(file);
+  }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+                 const char* shard_index_env,
+                 bool in_subprocess_for_death_test) {
+  if (in_subprocess_for_death_test) {
+    return false;
+  }
+
+  const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+  const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+  if (total_shards == -1 && shard_index == -1) {
+    return false;
+  } else if (total_shards == -1 && shard_index != -1) {
+    const Message msg = Message()
+      << "Invalid environment variables: you have "
+      << kTestShardIndex << " = " << shard_index
+      << ", but have left " << kTestTotalShards << " unset.\n";
+    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+    fflush(stdout);
+    exit(EXIT_FAILURE);
+  } else if (total_shards != -1 && shard_index == -1) {
+    const Message msg = Message()
+      << "Invalid environment variables: you have "
+      << kTestTotalShards << " = " << total_shards
+      << ", but have left " << kTestShardIndex << " unset.\n";
+    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+    fflush(stdout);
+    exit(EXIT_FAILURE);
+  } else if (shard_index < 0 || shard_index >= total_shards) {
+    const Message msg = Message()
+      << "Invalid environment variables: we require 0 <= "
+      << kTestShardIndex << " < " << kTestTotalShards
+      << ", but you have " << kTestShardIndex << "=" << shard_index
+      << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+    ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+    fflush(stdout);
+    exit(EXIT_FAILURE);
+  }
+
+  return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) {
+  const char* str_val = posix::GetEnv(var);
+  if (str_val == NULL) {
+    return default_val;
+  }
+
+  Int32 result;
+  if (!ParseInt32(Message() << "The value of environment variable " << var,
+                  str_val, &result)) {
+    exit(EXIT_FAILURE);
+  }
+  return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+  return (test_id % total_shards) == shard_index;
+}
+
+// Compares the name of each test with the user-specified filter to
+// decide whether the test should be run, then records the result in
+// each TestCase and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
+// Returns the number of tests that should run.
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+  const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+      Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+  const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+      Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+  // num_runnable_tests are the number of tests that will
+  // run across all shards (i.e., match filter and are not disabled).
+  // num_selected_tests are the number of tests to be run on
+  // this shard.
+  int num_runnable_tests = 0;
+  int num_selected_tests = 0;
+  for (size_t i = 0; i < test_cases_.size(); i++) {
+    TestCase* const test_case = test_cases_[i];
+    const std::string &test_case_name = test_case->name();
+    test_case->set_should_run(false);
+
+    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+      TestInfo* const test_info = test_case->test_info_list()[j];
+      const std::string test_name(test_info->name());
+      // A test is disabled if test case name or test name matches
+      // kDisableTestFilter.
+      const bool is_disabled =
+          internal::UnitTestOptions::MatchesFilter(test_case_name,
+                                                   kDisableTestFilter) ||
+          internal::UnitTestOptions::MatchesFilter(test_name,
+                                                   kDisableTestFilter);
+      test_info->is_disabled_ = is_disabled;
+
+      const bool matches_filter =
+          internal::UnitTestOptions::FilterMatchesTest(test_case_name,
+                                                       test_name);
+      test_info->matches_filter_ = matches_filter;
+
+      const bool is_runnable =
+          (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+          matches_filter;
+
+      const bool is_selected = is_runnable &&
+          (shard_tests == IGNORE_SHARDING_PROTOCOL ||
+           ShouldRunTestOnShard(total_shards, shard_index,
+                                num_runnable_tests));
+
+      num_runnable_tests += is_runnable;
+      num_selected_tests += is_selected;
+
+      test_info->should_run_ = is_selected;
+      test_case->set_should_run(test_case->should_run() || is_selected);
+    }
+  }
+  return num_selected_tests;
+}
+
+// Prints the given C-string on a single line by replacing all '\n'
+// characters with string "\\n".  If the output takes more than
+// max_length characters, only prints the first max_length characters
+// and "...".
+static void PrintOnOneLine(const char* str, int max_length) {
+  if (str != NULL) {
+    for (int i = 0; *str != '\0'; ++str) {
+      if (i >= max_length) {
+        printf("...");
+        break;
+      }
+      if (*str == '\n') {
+        printf("\\n");
+        i += 2;
+      } else {
+        printf("%c", *str);
+        ++i;
+      }
+    }
+  }
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+  // Print at most this many characters for each type/value parameter.
+  const int kMaxParamLength = 250;
+
+  for (size_t i = 0; i < test_cases_.size(); i++) {
+    const TestCase* const test_case = test_cases_[i];
+    bool printed_test_case_name = false;
+
+    for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+      const TestInfo* const test_info =
+          test_case->test_info_list()[j];
+      if (test_info->matches_filter_) {
+        if (!printed_test_case_name) {
+          printed_test_case_name = true;
+          printf("%s.", test_case->name());
+          if (test_case->type_param() != NULL) {
+            printf("  # %s = ", kTypeParamLabel);
+            // We print the type parameter on a single line to make
+            // the output easy to parse by a program.
+            PrintOnOneLine(test_case->type_param(), kMaxParamLength);
+          }
+          printf("\n");
+        }
+        printf("  %s", test_info->name());
+        if (test_info->value_param() != NULL) {
+          printf("  # %s = ", kValueParamLabel);
+          // We print the value parameter on a single line to make the
+          // output easy to parse by a program.
+          PrintOnOneLine(test_info->value_param(), kMaxParamLength);
+        }
+        printf("\n");
+      }
+    }
+  }
+  fflush(stdout);
+}
+
+// Sets the OS stack trace getter.
+//
+// Does nothing if the input and the current OS stack trace getter are
+// the same; otherwise, deletes the old getter and makes the input the
+// current getter.
+void UnitTestImpl::set_os_stack_trace_getter(
+    OsStackTraceGetterInterface* getter) {
+  if (os_stack_trace_getter_ != getter) {
+    delete os_stack_trace_getter_;
+    os_stack_trace_getter_ = getter;
+  }
+}
+
+// Returns the current OS stack trace getter if it is not NULL;
+// otherwise, creates an OsStackTraceGetter, makes it the current
+// getter, and returns it.
+OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
+  if (os_stack_trace_getter_ == NULL) {
+#ifdef GTEST_OS_STACK_TRACE_GETTER_
+    os_stack_trace_getter_ = new GTEST_OS_STACK_TRACE_GETTER_;
+#else
+    os_stack_trace_getter_ = new OsStackTraceGetter;
+#endif  // GTEST_OS_STACK_TRACE_GETTER_
+  }
+
+  return os_stack_trace_getter_;
+}
+
+// Returns the TestResult for the test that's currently running, or
+// the TestResult for the ad hoc test if no test is running.
+TestResult* UnitTestImpl::current_test_result() {
+  return current_test_info_ ?
+      &(current_test_info_->result_) : &ad_hoc_test_result_;
+}
+
+// Shuffles all test cases, and the tests within each test case,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+  // Shuffles the death test cases.
+  ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
+
+  // Shuffles the non-death test cases.
+  ShuffleRange(random(), last_death_test_case_ + 1,
+               static_cast<int>(test_cases_.size()), &test_case_indices_);
+
+  // Shuffles the tests inside each test case.
+  for (size_t i = 0; i < test_cases_.size(); i++) {
+    test_cases_[i]->ShuffleTests(random());
+  }
+}
+
+// Restores the test cases and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+  for (size_t i = 0; i < test_cases_.size(); i++) {
+    // Unshuffles the tests in each test case.
+    test_cases_[i]->UnshuffleTests();
+    // Resets the index of each test case.
+    test_case_indices_[i] = static_cast<int>(i);
+  }
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag.  The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+                                            int skip_count) {
+  // We pass skip_count + 1 to skip this wrapper function in addition
+  // to what the user really wants to skip.
+  return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+}
+
+// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to
+// suppress unreachable code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+  // This condition is always false so AlwaysTrue() never actually throws,
+  // but it makes the compiler think that it may throw.
+  if (IsTrue(false))
+    throw ClassUniqueToAlwaysTrue();
+#endif  // GTEST_HAS_EXCEPTIONS
+  return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false.  None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+  const size_t prefix_len = strlen(prefix);
+  if (strncmp(*pstr, prefix, prefix_len) == 0) {
+    *pstr += prefix_len;
+    return true;
+  }
+  return false;
+}
+
+// Parses a string as a command line flag.  The string should have
+// the format "--flag=value".  When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+const char* ParseFlagValue(const char* str,
+                           const char* flag,
+                           bool def_optional) {
+  // str and flag must not be NULL.
+  if (str == NULL || flag == NULL) return NULL;
+
+  // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+  const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag;
+  const size_t flag_len = flag_str.length();
+  if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+  // Skips the flag name.
+  const char* flag_end = str + flag_len;
+
+  // When def_optional is true, it's OK to not have a "=value" part.
+  if (def_optional && (flag_end[0] == '\0')) {
+    return flag_end;
+  }
+
+  // If def_optional is true and there are more characters after the
+  // flag name, or if def_optional is false, there must be a '=' after
+  // the flag name.
+  if (flag_end[0] != '=') return NULL;
+
+  // Returns the string after "=".
+  return flag_end + 1;
+}
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true as long as it does
+// not start with '0', 'f', or 'F'.
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+  // Gets the value of the flag as a string.
+  const char* const value_str = ParseFlagValue(str, flag, true);
+
+  // Aborts if the parsing failed.
+  if (value_str == NULL) return false;
+
+  // Converts the string value to a bool.
+  *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+  return true;
+}
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
+  // Gets the value of the flag as a string.
+  const char* const value_str = ParseFlagValue(str, flag, false);
+
+  // Aborts if the parsing failed.
+  if (value_str == NULL) return false;
+
+  // Sets *value to the value of the flag.
+  return ParseInt32(Message() << "The value of flag --" << flag,
+                    value_str, value);
+}
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
+  // Gets the value of the flag as a string.
+  const char* const value_str = ParseFlagValue(str, flag, false);
+
+  // Aborts if the parsing failed.
+  if (value_str == NULL) return false;
+
+  // Sets *value to the value of the flag.
+  *value = value_str;
+  return true;
+}
+
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+  return (SkipPrefix("--", &str) ||
+          SkipPrefix("-", &str) ||
+          SkipPrefix("/", &str)) &&
+         !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+         (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+          SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text.  The following escape
+// sequences can be used in the string to control the text color:
+//
+//   @@    prints a single '@' character.
+//   @R    changes the color to red.
+//   @G    changes the color to green.
+//   @Y    changes the color to yellow.
+//   @D    changes to the default terminal text color.
+//
+// TODO(wan@google.com): Write tests for this once we add stdout
+// capturing to Google Test.
+static void PrintColorEncoded(const char* str) {
+  GTestColor color = COLOR_DEFAULT;  // The current color.
+
+  // Conceptually, we split the string into segments divided by escape
+  // sequences.  Then we print one segment at a time.  At the end of
+  // each iteration, the str pointer advances to the beginning of the
+  // next segment.
+  for (;;) {
+    const char* p = strchr(str, '@');
+    if (p == NULL) {
+      ColoredPrintf(color, "%s", str);
+      return;
+    }
+
+    ColoredPrintf(color, "%s", std::string(str, p).c_str());
+
+    const char ch = p[1];
+    str = p + 2;
+    if (ch == '@') {
+      ColoredPrintf(color, "@");
+    } else if (ch == 'D') {
+      color = COLOR_DEFAULT;
+    } else if (ch == 'R') {
+      color = COLOR_RED;
+    } else if (ch == 'G') {
+      color = COLOR_GREEN;
+    } else if (ch == 'Y') {
+      color = COLOR_YELLOW;
+    } else {
+      --str;
+    }
+  }
+}
+
+static const char kColorEncodedHelpMessage[] =
+"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
+"following command line flags to control its behavior:\n"
+"\n"
+"Test Selection:\n"
+"  @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
+"      List the names of all tests instead of running them. The name of\n"
+"      TEST(Foo, Bar) is \"Foo.Bar\".\n"
+"  @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
+    "[@G-@YNEGATIVE_PATTERNS]@D\n"
+"      Run only the tests whose name matches one of the positive patterns but\n"
+"      none of the negative patterns. '?' matches any single character; '*'\n"
+"      matches any substring; ':' separates two patterns.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
+"      Run all disabled tests too.\n"
+"\n"
+"Test Execution:\n"
+"  @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
+"      Run the tests repeatedly; use a negative count to repeat forever.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
+"      Randomize tests' orders on every iteration.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
+"      Random number seed to use for shuffling test orders (between 1 and\n"
+"      99999, or 0 to use a seed based on the current time).\n"
+"\n"
+"Test Output:\n"
+"  @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+"      Enable/disable colored output. The default is @Gauto@D.\n"
+"  -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
+"      Don't print the elapsed time of each test.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
+    GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
+"      Generate an XML report in the given directory or with the given file\n"
+"      name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
+#if GTEST_CAN_STREAM_RESULTS_
+"  @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n"
+"      Stream test results to the given server.\n"
+#endif  // GTEST_CAN_STREAM_RESULTS_
+"\n"
+"Assertion Behavior:\n"
+#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+"  @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+"      Set the default death test style.\n"
+#endif  // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+"  @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
+"      Turn assertion failures into debugger break-points.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
+"      Turn assertion failures into C++ exceptions.\n"
+"  @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n"
+"      Do not report exceptions as test failures. Instead, allow them\n"
+"      to crash the program or throw a pop-up (on Windows).\n"
+"\n"
+"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
+    "the corresponding\n"
+"environment variable of a flag (all letters in upper-case). For example, to\n"
+"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
+    "color=no@D or set\n"
+"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
+"\n"
+"For more information, please read the " GTEST_NAME_ " documentation at\n"
+"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
+"(not one in your own code or tests), please report it to\n"
+"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
+bool ParseGoogleTestFlag(const char* const arg) {
+  return ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+                       &GTEST_FLAG(also_run_disabled_tests)) ||
+      ParseBoolFlag(arg, kBreakOnFailureFlag,
+                    &GTEST_FLAG(break_on_failure)) ||
+      ParseBoolFlag(arg, kCatchExceptionsFlag,
+                    &GTEST_FLAG(catch_exceptions)) ||
+      ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
+      ParseStringFlag(arg, kDeathTestStyleFlag,
+                      &GTEST_FLAG(death_test_style)) ||
+      ParseBoolFlag(arg, kDeathTestUseFork,
+                    &GTEST_FLAG(death_test_use_fork)) ||
+      ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
+      ParseStringFlag(arg, kInternalRunDeathTestFlag,
+                      &GTEST_FLAG(internal_run_death_test)) ||
+      ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
+      ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
+      ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
+      ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
+      ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
+      ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
+      ParseInt32Flag(arg, kStackTraceDepthFlag,
+                     &GTEST_FLAG(stack_trace_depth)) ||
+      ParseStringFlag(arg, kStreamResultToFlag,
+                      &GTEST_FLAG(stream_result_to)) ||
+      ParseBoolFlag(arg, kThrowOnFailureFlag,
+                    &GTEST_FLAG(throw_on_failure));
+}
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+void LoadFlagsFromFile(const std::string& path) {
+  FILE* flagfile = posix::FOpen(path.c_str(), "r");
+  if (!flagfile) {
+    fprintf(stderr,
+            "Unable to open file \"%s\"\n",
+            GTEST_FLAG(flagfile).c_str());
+    fflush(stderr);
+    exit(EXIT_FAILURE);
+  }
+  std::string contents(ReadEntireFile(flagfile));
+  posix::FClose(flagfile);
+  std::vector<std::string> lines;
+  SplitString(contents, '\n', &lines);
+  for (size_t i = 0; i < lines.size(); ++i) {
+    if (lines[i].empty())
+      continue;
+    if (!ParseGoogleTestFlag(lines[i].c_str()))
+      g_help_flag = true;
+  }
+}
+#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.  The type parameter CharType can be
+// instantiated to either char or wchar_t.
+template <typename CharType>
+void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
+  for (int i = 1; i < *argc; i++) {
+    const std::string arg_string = StreamableToString(argv[i]);
+    const char* const arg = arg_string.c_str();
+
+    using internal::ParseBoolFlag;
+    using internal::ParseInt32Flag;
+    using internal::ParseStringFlag;
+
+    bool remove_flag = false;
+    if (ParseGoogleTestFlag(arg)) {
+      remove_flag = true;
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+    } else if (ParseStringFlag(arg, kFlagfileFlag, &GTEST_FLAG(flagfile))) {
+      LoadFlagsFromFile(GTEST_FLAG(flagfile));
+      remove_flag = true;
+#endif  // GTEST_USE_OWN_FLAGFILE_FLAG_
+    } else if (arg_string == "--help" || arg_string == "-h" ||
+               arg_string == "-?" || arg_string == "/?" ||
+               HasGoogleTestFlagPrefix(arg)) {
+      // Both help flag and unrecognized Google Test flags (excluding
+      // internal ones) trigger help display.
+      g_help_flag = true;
+    }
+
+    if (remove_flag) {
+      // Shift the remainder of the argv list left by one.  Note
+      // that argv has (*argc + 1) elements, the last one always being
+      // NULL.  The following loop moves the trailing NULL element as
+      // well.
+      for (int j = i; j != *argc; j++) {
+        argv[j] = argv[j + 1];
+      }
+
+      // Decrements the argument count.
+      (*argc)--;
+
+      // We also need to decrement the iterator as we just removed
+      // an element.
+      i--;
+    }
+  }
+
+  if (g_help_flag) {
+    // We print the help here instead of in RUN_ALL_TESTS(), as the
+    // latter may not be called at all if the user is using Google
+    // Test with another testing framework.
+    PrintColorEncoded(kColorEncodedHelpMessage);
+  }
+}
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
+  ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
+  ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+
+// The internal implementation of InitGoogleTest().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleTestImpl(int* argc, CharType** argv) {
+  // We don't want to run the initialization code twice.
+  if (GTestIsInitialized()) return;
+
+  if (*argc <= 0) return;
+
+  g_argvs.clear();
+  for (int i = 0; i != *argc; i++) {
+    g_argvs.push_back(StreamableToString(argv[i]));
+  }
+
+  ParseGoogleTestFlagsOnly(argc, argv);
+  GetUnitTestImpl()->PostFlagParsingInit();
+}
+
+}  // namespace internal
+
+// Initializes Google Test.  This must be called before calling
+// RUN_ALL_TESTS().  In particular, it parses a command line for the
+// flags that Google Test recognizes.  Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned.  Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+void InitGoogleTest(int* argc, char** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+  GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+  internal::InitGoogleTestImpl(argc, argv);
+#endif  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleTest(int* argc, wchar_t** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+  GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+  internal::InitGoogleTestImpl(argc, argv);
+#endif  // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+}  // namespace testing
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
+//
+// This file implements death tests.
+
+
+#if GTEST_HAS_DEATH_TEST
+
+# if GTEST_OS_MAC
+#  include <crt_externs.h>
+# endif  // GTEST_OS_MAC
+
+# include <errno.h>
+# include <fcntl.h>
+# include <limits.h>
+
+# if GTEST_OS_LINUX
+#  include <signal.h>
+# endif  // GTEST_OS_LINUX
+
+# include <stdarg.h>
+
+# if GTEST_OS_WINDOWS
+#  include <windows.h>
+# else
+#  include <sys/mman.h>
+#  include <sys/wait.h>
+# endif  // GTEST_OS_WINDOWS
+
+# if GTEST_OS_QNX
+#  include <spawn.h>
+# endif  // GTEST_OS_QNX
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation.  It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error.  This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+// Constants.
+
+// The default death test style.
+static const char kDefaultDeathTestStyle[] = "fast";
+
+GTEST_DEFINE_string_(
+    death_test_style,
+    internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
+    "Indicates how to run a death test in a forked child process: "
+    "\"threadsafe\" (child process re-executes the test binary "
+    "from the beginning, running only the specific death test) or "
+    "\"fast\" (child process runs the death test immediately "
+    "after forking).");
+
+GTEST_DEFINE_bool_(
+    death_test_use_fork,
+    internal::BoolFromGTestEnv("death_test_use_fork", false),
+    "Instructs to use fork()/_exit() instead of clone() in death tests. "
+    "Ignored and always uses fork() on POSIX systems where clone() is not "
+    "implemented. Useful when running under valgrind or similar tools if "
+    "those do not support clone(). Valgrind 3.3.1 will just fail if "
+    "it sees an unsupported combination of clone() flags. "
+    "It is not recommended to use this flag w/o valgrind though it will "
+    "work in 99% of the cases. Once valgrind is fixed, this flag will "
+    "most likely be removed.");
+
+namespace internal {
+GTEST_DEFINE_string_(
+    internal_run_death_test, "",
+    "Indicates the file, line number, temporal index of "
+    "the single death test to run, and a file descriptor to "
+    "which a success code may be sent, all separated by "
+    "the '|' characters.  This flag is specified if and only if the current "
+    "process is a sub-process launched for running a thread-safe "
+    "death test.  FOR INTERNAL USE ONLY.");
+}  // namespace internal
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Valid only for fast death tests. Indicates the code is running in the
+// child process of a fast style death test.
+# if !GTEST_OS_WINDOWS
+static bool g_in_fast_death_test_child = false;
+# endif
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process.  Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests.  IMPORTANT: This is an internal utility.  Using it may break the
+// implementation of death tests.  User code MUST NOT use it.
+bool InDeathTestChild() {
+# if GTEST_OS_WINDOWS
+
+  // On Windows, death tests are thread-safe regardless of the value of the
+  // death_test_style flag.
+  return !GTEST_FLAG(internal_run_death_test).empty();
+
+# else
+
+  if (GTEST_FLAG(death_test_style) == "threadsafe")
+    return !GTEST_FLAG(internal_run_death_test).empty();
+  else
+    return g_in_fast_death_test_child;
+#endif
+}
+
+}  // namespace internal
+
+// ExitedWithCode constructor.
+ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
+}
+
+// ExitedWithCode function-call operator.
+bool ExitedWithCode::operator()(int exit_status) const {
+# if GTEST_OS_WINDOWS
+
+  return exit_status == exit_code_;
+
+# else
+
+  return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+
+# endif  // GTEST_OS_WINDOWS
+}
+
+# if !GTEST_OS_WINDOWS
+// KilledBySignal constructor.
+KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
+}
+
+// KilledBySignal function-call operator.
+bool KilledBySignal::operator()(int exit_status) const {
+#  if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+  {
+    bool result;
+    if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) {
+      return result;
+    }
+  }
+#  endif  // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+  return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
+}
+# endif  // !GTEST_OS_WINDOWS
+
+namespace internal {
+
+// Utilities needed for death tests.
+
+// Generates a textual description of a given exit code, in the format
+// specified by wait(2).
+static std::string ExitSummary(int exit_code) {
+  Message m;
+
+# if GTEST_OS_WINDOWS
+
+  m << "Exited with exit status " << exit_code;
+
+# else
+
+  if (WIFEXITED(exit_code)) {
+    m << "Exited with exit status " << WEXITSTATUS(exit_code);
+  } else if (WIFSIGNALED(exit_code)) {
+    m << "Terminated by signal " << WTERMSIG(exit_code);
+  }
+#  ifdef WCOREDUMP
+  if (WCOREDUMP(exit_code)) {
+    m << " (core dumped)";
+  }
+#  endif
+# endif  // GTEST_OS_WINDOWS
+
+  return m.GetString();
+}
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+bool ExitedUnsuccessfully(int exit_status) {
+  return !ExitedWithCode(0)(exit_status);
+}
+
+# if !GTEST_OS_WINDOWS
+// Generates a textual failure message when a death test finds more than
+// one thread running, or cannot determine the number of threads, prior
+// to executing the given statement.  It is the responsibility of the
+// caller not to pass a thread_count of 1.
+static std::string DeathTestThreadWarning(size_t thread_count) {
+  Message msg;
+  msg << "Death tests use fork(), which is unsafe particularly"
+      << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
+  if (thread_count == 0)
+    msg << "couldn't detect the number of threads.";
+  else
+    msg << "detected " << thread_count << " threads.";
+  return msg.GetString();
+}
+# endif  // !GTEST_OS_WINDOWS
+
+// Flag characters for reporting a death test that did not die.
+static const char kDeathTestLived = 'L';
+static const char kDeathTestReturned = 'R';
+static const char kDeathTestThrew = 'T';
+static const char kDeathTestInternalError = 'I';
+
+// An enumeration describing all of the possible ways that a death test can
+// conclude.  DIED means that the process died while executing the test
+// code; LIVED means that process lived beyond the end of the test code;
+// RETURNED means that the test statement attempted to execute a return
+// statement, which is not allowed; THREW means that the test statement
+// returned control by throwing an exception.  IN_PROGRESS means the test
+// has not yet concluded.
+// TODO(vladl@google.com): Unify names and possibly values for
+// AbortReason, DeathTestOutcome, and flag characters above.
+enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
+
+// Routine for aborting the program which is safe to call from an
+// exec-style death test child process, in which case the error
+// message is propagated back to the parent process.  Otherwise, the
+// message is simply printed to stderr.  In either case, the program
+// then exits with status 1.
+void DeathTestAbort(const std::string& message) {
+  // On a POSIX system, this function may be called from a threadsafe-style
+  // death test child process, which operates on a very small stack.  Use
+  // the heap for any additional non-minuscule memory requirements.
+  const InternalRunDeathTestFlag* const flag =
+      GetUnitTestImpl()->internal_run_death_test_flag();
+  if (flag != NULL) {
+    FILE* parent = posix::FDOpen(flag->write_fd(), "w");
+    fputc(kDeathTestInternalError, parent);
+    fprintf(parent, "%s", message.c_str());
+    fflush(parent);
+    _exit(1);
+  } else {
+    fprintf(stderr, "%s", message.c_str());
+    fflush(stderr);
+    posix::Abort();
+  }
+}
+
+// A replacement for CHECK that calls DeathTestAbort if the assertion
+// fails.
+# define GTEST_DEATH_TEST_CHECK_(expression) \
+  do { \
+    if (!::testing::internal::IsTrue(expression)) { \
+      DeathTestAbort( \
+          ::std::string("CHECK failed: File ") + __FILE__ +  ", line " \
+          + ::testing::internal::StreamableToString(__LINE__) + ": " \
+          + #expression); \
+    } \
+  } while (::testing::internal::AlwaysFalse())
+
+// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
+// evaluating any system call that fulfills two conditions: it must return
+// -1 on failure, and set errno to EINTR when it is interrupted and
+// should be tried again.  The macro expands to a loop that repeatedly
+// evaluates the expression as long as it evaluates to -1 and sets
+// errno to EINTR.  If the expression evaluates to -1 but errno is
+// something other than EINTR, DeathTestAbort is called.
+# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
+  do { \
+    int gtest_retval; \
+    do { \
+      gtest_retval = (expression); \
+    } while (gtest_retval == -1 && errno == EINTR); \
+    if (gtest_retval == -1) { \
+      DeathTestAbort( \
+          ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
+          + ::testing::internal::StreamableToString(__LINE__) + ": " \
+          + #expression + " != -1"); \
+    } \
+  } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+std::string GetLastErrnoDescription() {
+    return errno == 0 ? "" : posix::StrError(errno);
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+  Message error;
+  char buffer[256];
+  int num_read;
+
+  do {
+    while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+      buffer[num_read] = '\0';
+      error << buffer;
+    }
+  } while (num_read == -1 && errno == EINTR);
+
+  if (num_read == 0) {
+    GTEST_LOG_(FATAL) << error.GetString();
+  } else {
+    const int last_error = errno;
+    GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+                      << GetLastErrnoDescription() << " [" << last_error << "]";
+  }
+}
+
+// Death test constructor.  Increments the running death test count
+// for the current test.
+DeathTest::DeathTest() {
+  TestInfo* const info = GetUnitTestImpl()->current_test_info();
+  if (info == NULL) {
+    DeathTestAbort("Cannot run a death test outside of a TEST or "
+                   "TEST_F construct");
+  }
+}
+
+// Creates and returns a death test by dispatching to the current
+// death test factory.
+bool DeathTest::Create(const char* statement, const RE* regex,
+                       const char* file, int line, DeathTest** test) {
+  return GetUnitTestImpl()->death_test_factory()->Create(
+      statement, regex, file, line, test);
+}
+
+const char* DeathTest::LastMessage() {
+  return last_death_test_message_.c_str();
+}
+
+void DeathTest::set_last_death_test_message(const std::string& message) {
+  last_death_test_message_ = message;
+}
+
+std::string DeathTest::last_death_test_message_;
+
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
+ protected:
+  DeathTestImpl(const char* a_statement, const RE* a_regex)
+      : statement_(a_statement),
+        regex_(a_regex),
+        spawned_(false),
+        status_(-1),
+        outcome_(IN_PROGRESS),
+        read_fd_(-1),
+        write_fd_(-1) {}
+
+  // read_fd_ is expected to be closed and cleared by a derived class.
+  ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+  void Abort(AbortReason reason);
+  virtual bool Passed(bool status_ok);
+
+  const char* statement() const { return statement_; }
+  const RE* regex() const { return regex_; }
+  bool spawned() const { return spawned_; }
+  void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+  int status() const { return status_; }
+  void set_status(int a_status) { status_ = a_status; }
+  DeathTestOutcome outcome() const { return outcome_; }
+  void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+  int read_fd() const { return read_fd_; }
+  void set_read_fd(int fd) { read_fd_ = fd; }
+  int write_fd() const { return write_fd_; }
+  void set_write_fd(int fd) { write_fd_ = fd; }
+
+  // Called in the parent process only. Reads the result code of the death
+  // test child process via a pipe, interprets it to set the outcome_
+  // member, and closes read_fd_.  Outputs diagnostics and terminates in
+  // case of unexpected codes.
+  void ReadAndInterpretStatusByte();
+
+ private:
+  // The textual content of the code this object is testing.  This class
+  // doesn't own this string and should not attempt to delete it.
+  const char* const statement_;
+  // The regular expression which test output must match.  DeathTestImpl
+  // doesn't own this object and should not attempt to delete it.
+  const RE* const regex_;
+  // True if the death test child process has been successfully spawned.
+  bool spawned_;
+  // The exit status of the child process.
+  int status_;
+  // How the death test concluded.
+  DeathTestOutcome outcome_;
+  // Descriptor to the read end of the pipe to the child process.  It is
+  // always -1 in the child process.  The child keeps its write end of the
+  // pipe in write_fd_.
+  int read_fd_;
+  // Descriptor to the child's write end of the pipe to the parent process.
+  // It is always -1 in the parent process.  The parent keeps its end of the
+  // pipe in read_fd_.
+  int write_fd_;
+};
+
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_.  Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+  char flag;
+  int bytes_read;
+
+  // The read() here blocks until data is available (signifying the
+  // failure of the death test) or until the pipe is closed (signifying
+  // its success), so it's okay to call this in the parent before
+  // the child process has exited.
+  do {
+    bytes_read = posix::Read(read_fd(), &flag, 1);
+  } while (bytes_read == -1 && errno == EINTR);
+
+  if (bytes_read == 0) {
+    set_outcome(DIED);
+  } else if (bytes_read == 1) {
+    switch (flag) {
+      case kDeathTestReturned:
+        set_outcome(RETURNED);
+        break;
+      case kDeathTestThrew:
+        set_outcome(THREW);
+        break;
+      case kDeathTestLived:
+        set_outcome(LIVED);
+        break;
+      case kDeathTestInternalError:
+        FailFromInternalError(read_fd());  // Does not return.
+        break;
+      default:
+        GTEST_LOG_(FATAL) << "Death test child process reported "
+                          << "unexpected status byte ("
+                          << static_cast<unsigned int>(flag) << ")";
+    }
+  } else {
+    GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+                      << GetLastErrnoDescription();
+  }
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+  set_read_fd(-1);
+}
+
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+  // The parent process considers the death test to be a failure if
+  // it finds any data in our pipe.  So, here we write a single flag byte
+  // to the pipe, then exit.
+  const char status_ch =
+      reason == TEST_DID_NOT_DIE ? kDeathTestLived :
+      reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;
+
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+  // We are leaking the descriptor here because on some platforms (i.e.,
+  // when built as Windows DLL), destructors of global objects will still
+  // run after calling _exit(). On such systems, write_fd_ will be
+  // indirectly closed from the destructor of UnitTestImpl, causing double
+  // close if it is also closed here. On debug configurations, double close
+  // may assert. As there are no in-process buffers to flush here, we are
+  // relying on the OS to close the descriptor after the process terminates
+  // when the destructors are not run.
+  _exit(1);  // Exits w/o any normal exit hooks (we were supposed to crash)
+}
+
+// Returns an indented copy of stderr output for a death test.
+// This makes distinguishing death test output lines from regular log lines
+// much easier.
+static ::std::string FormatDeathTestOutput(const ::std::string& output) {
+  ::std::string ret;
+  for (size_t at = 0; ; ) {
+    const size_t line_end = output.find('\n', at);
+    ret += "[  DEATH   ] ";
+    if (line_end == ::std::string::npos) {
+      ret += output.substr(at);
+      break;
+    }
+    ret += output.substr(at, line_end + 1 - at);
+    at = line_end + 1;
+  }
+  return ret;
+}
+
+// Assesses the success or failure of a death test, using both private
+// members which have previously been set, and one argument:
+//
+// Private data members:
+//   outcome:  An enumeration describing how the death test
+//             concluded: DIED, LIVED, THREW, or RETURNED.  The death test
+//             fails in the latter three cases.
+//   status:   The exit status of the child process. On *nix, it is in the
+//             in the format specified by wait(2). On Windows, this is the
+//             value supplied to the ExitProcess() API or a numeric code
+//             of the exception that terminated the program.
+//   regex:    A regular expression object to be applied to
+//             the test's captured standard error output; the death test
+//             fails if it does not match.
+//
+// Argument:
+//   status_ok: true if exit_status is acceptable in the context of
+//              this particular death test, which fails if it is false
+//
+// Returns true iff all of the above conditions are met.  Otherwise, the
+// first failing condition, in the order given above, is the one that is
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+  if (!spawned())
+    return false;
+
+  const std::string error_message = GetCapturedStderr();
+
+  bool success = false;
+  Message buffer;
+
+  buffer << "Death test: " << statement() << "\n";
+  switch (outcome()) {
+    case LIVED:
+      buffer << "    Result: failed to die.\n"
+             << " Error msg:\n" << FormatDeathTestOutput(error_message);
+      break;
+    case THREW:
+      buffer << "    Result: threw an exception.\n"
+             << " Error msg:\n" << FormatDeathTestOutput(error_message);
+      break;
+    case RETURNED:
+      buffer << "    Result: illegal return in test statement.\n"
+             << " Error msg:\n" << FormatDeathTestOutput(error_message);
+      break;
+    case DIED:
+      if (status_ok) {
+        const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
+        if (matched) {
+          success = true;
+        } else {
+          buffer << "    Result: died but not with expected error.\n"
+                 << "  Expected: " << regex()->pattern() << "\n"
+                 << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+        }
+      } else {
+        buffer << "    Result: died but not with expected exit code:\n"
+               << "            " << ExitSummary(status()) << "\n"
+               << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+      }
+      break;
+    case IN_PROGRESS:
+    default:
+      GTEST_LOG_(FATAL)
+          << "DeathTest::Passed somehow called before conclusion of test";
+  }
+
+  DeathTest::set_last_death_test_message(buffer.GetString());
+  return success;
+}
+
+# if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes:  Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+//    ends of it.
+// 2. The parent starts the child and provides it with the information
+//    necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+//    using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+//    this is done before step 3, the object's reference count goes down to
+//    0 and it is destroyed, preventing the child from acquiring it. The
+//    parent now has to release it, or read operations on the read end of
+//    the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+//    any possible error messages) from the pipe, and its stderr and then
+//    determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+  WindowsDeathTest(const char* a_statement,
+                   const RE* a_regex,
+                   const char* file,
+                   int line)
+      : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {}
+
+  // All of these virtual functions are inherited from DeathTest.
+  virtual int Wait();
+  virtual TestRole AssumeRole();
+
+ private:
+  // The name of the file in which the death test is located.
+  const char* const file_;
+  // The line number on which the death test is located.
+  const int line_;
+  // Handle to the write end of the pipe to the child process.
+  AutoHandle write_handle_;
+  // Child process handle.
+  AutoHandle child_handle_;
+  // Event the child process uses to signal the parent that it has
+  // acquired the handle to the write end of the pipe. After seeing this
+  // event the parent can release its own handles to make sure its
+  // ReadFile() calls return when the child terminates.
+  AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists.  As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+  if (!spawned())
+    return 0;
+
+  // Wait until the child either signals that it has acquired the write end
+  // of the pipe or it dies.
+  const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+  switch (::WaitForMultipleObjects(2,
+                                   wait_handles,
+                                   FALSE,  // Waits for any of the handles.
+                                   INFINITE)) {
+    case WAIT_OBJECT_0:
+    case WAIT_OBJECT_0 + 1:
+      break;
+    default:
+      GTEST_DEATH_TEST_CHECK_(false);  // Should not get here.
+  }
+
+  // The child has acquired the write end of the pipe or exited.
+  // We release the handle on our side and continue.
+  write_handle_.Reset();
+  event_handle_.Reset();
+
+  ReadAndInterpretStatusByte();
+
+  // Waits for the child process to exit if it haven't already. This
+  // returns immediately if the child has already exited, regardless of
+  // whether previous calls to WaitForMultipleObjects synchronized on this
+  // handle or not.
+  GTEST_DEATH_TEST_CHECK_(
+      WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+                                             INFINITE));
+  DWORD status_code;
+  GTEST_DEATH_TEST_CHECK_(
+      ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);
+  child_handle_.Reset();
+  set_status(static_cast<int>(status_code));
+  return status();
+}
+
+// The AssumeRole process for a Windows death test.  It creates a child
+// process with the same executable as the current process to run the
+// death test.  The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+  const UnitTestImpl* const impl = GetUnitTestImpl();
+  const InternalRunDeathTestFlag* const flag =
+      impl->internal_run_death_test_flag();
+  const TestInfo* const info = impl->current_test_info();
+  const int death_test_index = info->result()->death_test_count();
+
+  if (flag != NULL) {
+    // ParseInternalRunDeathTestFlag() has performed all the necessary
+    // processing.
+    set_write_fd(flag->write_fd());
+    return EXECUTE_TEST;
+  }
+
+  // WindowsDeathTest uses an anonymous pipe to communicate results of
+  // a death test.
+  SECURITY_ATTRIBUTES handles_are_inheritable = {
+    sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+  HANDLE read_handle, write_handle;
+  GTEST_DEATH_TEST_CHECK_(
+      ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+                   0)  // Default buffer size.
+      != FALSE);
+  set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+                                O_RDONLY));
+  write_handle_.Reset(write_handle);
+  event_handle_.Reset(::CreateEvent(
+      &handles_are_inheritable,
+      TRUE,    // The event will automatically reset to non-signaled state.
+      FALSE,   // The initial state is non-signalled.
+      NULL));  // The even is unnamed.
+  GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
+  const std::string filter_flag =
+      std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" +
+      info->test_case_name() + "." + info->name();
+  const std::string internal_flag =
+      std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag +
+      "=" + file_ + "|" + StreamableToString(line_) + "|" +
+      StreamableToString(death_test_index) + "|" +
+      StreamableToString(static_cast<unsigned int>(::GetCurrentProcessId())) +
+      // size_t has the same width as pointers on both 32-bit and 64-bit
+      // Windows platforms.
+      // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+      "|" + StreamableToString(reinterpret_cast<size_t>(write_handle)) +
+      "|" + StreamableToString(reinterpret_cast<size_t>(event_handle_.Get()));
+
+  char executable_path[_MAX_PATH + 1];  // NOLINT
+  GTEST_DEATH_TEST_CHECK_(
+      _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
+                                            executable_path,
+                                            _MAX_PATH));
+
+  std::string command_line =
+      std::string(::GetCommandLineA()) + " " + filter_flag + " \"" +
+      internal_flag + "\"";
+
+  DeathTest::set_last_death_test_message("");
+
+  CaptureStderr();
+  // Flush the log buffers since the log streams are shared with the child.
+  FlushInfoLog();
+
+  // The child process will share the standard handles with the parent.
+  STARTUPINFOA startup_info;
+  memset(&startup_info, 0, sizeof(STARTUPINFO));
+  startup_info.dwFlags = STARTF_USESTDHANDLES;
+  startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+  startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+  startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+  PROCESS_INFORMATION process_info;
+  GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
+      executable_path,
+      const_cast<char*>(command_line.c_str()),
+      NULL,   // Retuned process handle is not inheritable.
+      NULL,   // Retuned thread handle is not inheritable.
+      TRUE,   // Child inherits all inheritable handles (for write_handle_).
+      0x0,    // Default creation flags.
+      NULL,   // Inherit the parent's environment.
+      UnitTest::GetInstance()->original_working_dir(),
+      &startup_info,
+      &process_info) != FALSE);
+  child_handle_.Reset(process_info.hProcess);
+  ::CloseHandle(process_info.hThread);
+  set_spawned(true);
+  return OVERSEE_TEST;
+}
+# else  // We are not on Windows.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface.  Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+  ForkingDeathTest(const char* statement, const RE* regex);
+
+  // All of these virtual functions are inherited from DeathTest.
+  virtual int Wait();
+
+ protected:
+  void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+  // PID of child process during death test; 0 in the child process itself.
+  pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
+    : DeathTestImpl(a_statement, a_regex),
+      child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists.  As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+  if (!spawned())
+    return 0;
+
+  ReadAndInterpretStatusByte();
+
+  int status_value;
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+  set_status(status_value);
+  return status_value;
+}
+
+// A concrete death test class that forks, then immediately runs the test
+// in the child process.
+class NoExecDeathTest : public ForkingDeathTest {
+ public:
+  NoExecDeathTest(const char* a_statement, const RE* a_regex) :
+      ForkingDeathTest(a_statement, a_regex) { }
+  virtual TestRole AssumeRole();
+};
+
+// The AssumeRole process for a fork-and-run death test.  It implements a
+// straightforward fork, with a simple pipe to transmit the status byte.
+DeathTest::TestRole NoExecDeathTest::AssumeRole() {
+  const size_t thread_count = GetThreadCount();
+  if (thread_count != 1) {
+    GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
+  }
+
+  int pipe_fd[2];
+  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+
+  DeathTest::set_last_death_test_message("");
+  CaptureStderr();
+  // When we fork the process below, the log file buffers are copied, but the
+  // file descriptors are shared.  We flush all log files here so that closing
+  // the file descriptors in the child process doesn't throw off the
+  // synchronization between descriptors and buffers in the parent process.
+  // This is as close to the fork as possible to avoid a race condition in case
+  // there are multiple threads running before the death test, and another
+  // thread writes to the log file.
+  FlushInfoLog();
+
+  const pid_t child_pid = fork();
+  GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+  set_child_pid(child_pid);
+  if (child_pid == 0) {
+    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
+    set_write_fd(pipe_fd[1]);
+    // Redirects all logging to stderr in the child process to prevent
+    // concurrent writes to the log files.  We capture stderr in the parent
+    // process and append the child process' output to a log.
+    LogToStderr();
+    // Event forwarding to the listeners of event listener API mush be shut
+    // down in death test subprocesses.
+    GetUnitTestImpl()->listeners()->SuppressEventForwarding();
+    g_in_fast_death_test_child = true;
+    return EXECUTE_TEST;
+  } else {
+    GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+    set_read_fd(pipe_fd[0]);
+    set_spawned(true);
+    return OVERSEE_TEST;
+  }
+}
+
+// A concrete death test class that forks and re-executes the main
+// program from the beginning, with command-line flags set that cause
+// only this specific death test to be run.
+class ExecDeathTest : public ForkingDeathTest {
+ public:
+  ExecDeathTest(const char* a_statement, const RE* a_regex,
+                const char* file, int line) :
+      ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
+  virtual TestRole AssumeRole();
+ private:
+  static ::std::vector<testing::internal::string>
+  GetArgvsForDeathTestChildProcess() {
+    ::std::vector<testing::internal::string> args = GetInjectableArgvs();
+#  if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+    ::std::vector<testing::internal::string> extra_args =
+        GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_();
+    args.insert(args.end(), extra_args.begin(), extra_args.end());
+#  endif  // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+    return args;
+  }
+  // The name of the file in which the death test is located.
+  const char* const file_;
+  // The line number on which the death test is located.
+  const int line_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+  Arguments() {
+    args_.push_back(NULL);
+  }
+
+  ~Arguments() {
+    for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+         ++i) {
+      free(*i);
+    }
+  }
+  void AddArgument(const char* argument) {
+    args_.insert(args_.end() - 1, posix::StrDup(argument));
+  }
+
+  template <typename Str>
+  void AddArguments(const ::std::vector<Str>& arguments) {
+    for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+         i != arguments.end();
+         ++i) {
+      args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+    }
+  }
+  char* const* Argv() {
+    return &args_[0];
+  }
+
+ private:
+  std::vector<char*> args_;
+};
+
+// A struct that encompasses the arguments to the child process of a
+// threadsafe-style death test process.
+struct ExecDeathTestArgs {
+  char* const* argv;  // Command-line arguments for the child's call to exec
+  int close_fd;       // File descriptor to close; the read end of a pipe
+};
+
+#  if GTEST_OS_MAC
+inline char** GetEnviron() {
+  // When Google Test is built as a framework on MacOS X, the environ variable
+  // is unavailable. Apple's documentation (man environ) recommends using
+  // _NSGetEnviron() instead.
+  return *_NSGetEnviron();
+}
+#  else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+#  endif  // GTEST_OS_MAC
+
+#  if !GTEST_OS_QNX
+// The main function for a threadsafe-style death test child process.
+// This function is called in a clone()-ed process and thus must avoid
+// any potentially unsafe operations like malloc or libc functions.
+static int ExecDeathTestChildMain(void* child_arg) {
+  ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
+
+  // We need to execute the test program in the same environment where
+  // it was originally invoked.  Therefore we change to the original
+  // working directory first.
+  const char* const original_dir =
+      UnitTest::GetInstance()->original_working_dir();
+  // We can safely call chdir() as it's a direct system call.
+  if (chdir(original_dir) != 0) {
+    DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+                   GetLastErrnoDescription());
+    return EXIT_FAILURE;
+  }
+
+  // We can safely call execve() as it's a direct system call.  We
+  // cannot use execvp() as it's a libc function and thus potentially
+  // unsafe.  Since execve() doesn't search the PATH, the user must
+  // invoke the test program via a valid path that contains at least
+  // one path separator.
+  execve(args->argv[0], args->argv, GetEnviron());
+  DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " +
+                 original_dir + " failed: " +
+                 GetLastErrnoDescription());
+  return EXIT_FAILURE;
+}
+#  endif  // !GTEST_OS_QNX
+
+// Two utility routines that together determine the direction the stack
+// grows.
+// This could be accomplished more elegantly by a single recursive
+// function, but we want to guard against the unlikely possibility of
+// a smart compiler optimizing the recursion away.
+//
+// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining
+// StackLowerThanAddress into StackGrowsDown, which then doesn't give
+// correct answer.
+void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_;
+void StackLowerThanAddress(const void* ptr, bool* result) {
+  int dummy;
+  *result = (&dummy < ptr);
+}
+
+// Make sure AddressSanitizer does not tamper with the stack here.
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+bool StackGrowsDown() {
+  int dummy;
+  bool result;
+  StackLowerThanAddress(&dummy, &result);
+  return result;
+}
+
+// Spawns a child process with the same executable as the current process in
+// a thread-safe manner and instructs it to run the death test.  The
+// implementation uses fork(2) + exec.  On systems where clone(2) is
+// available, it is used instead, being slightly more thread-safe.  On QNX,
+// fork supports only single-threaded environments, so this function uses
+// spawn(2) there instead.  The function dies with an error message if
+// anything goes wrong.
+static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) {
+  ExecDeathTestArgs args = { argv, close_fd };
+  pid_t child_pid = -1;
+
+#  if GTEST_OS_QNX
+  // Obtains the current directory and sets it to be closed in the child
+  // process.
+  const int cwd_fd = open(".", O_RDONLY);
+  GTEST_DEATH_TEST_CHECK_(cwd_fd != -1);
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC));
+  // We need to execute the test program in the same environment where
+  // it was originally invoked.  Therefore we change to the original
+  // working directory first.
+  const char* const original_dir =
+      UnitTest::GetInstance()->original_working_dir();
+  // We can safely call chdir() as it's a direct system call.
+  if (chdir(original_dir) != 0) {
+    DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+                   GetLastErrnoDescription());
+    return EXIT_FAILURE;
+  }
+
+  int fd_flags;
+  // Set close_fd to be closed after spawn.
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD));
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD,
+                                        fd_flags | FD_CLOEXEC));
+  struct inheritance inherit = {0};
+  // spawn is a system call.
+  child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron());
+  // Restores the current working directory.
+  GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1);
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd));
+
+#  else   // GTEST_OS_QNX
+#   if GTEST_OS_LINUX
+  // When a SIGPROF signal is received while fork() or clone() are executing,
+  // the process may hang. To avoid this, we ignore SIGPROF here and re-enable
+  // it after the call to fork()/clone() is complete.
+  struct sigaction saved_sigprof_action;
+  struct sigaction ignore_sigprof_action;
+  memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action));
+  sigemptyset(&ignore_sigprof_action.sa_mask);
+  ignore_sigprof_action.sa_handler = SIG_IGN;
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction(
+      SIGPROF, &ignore_sigprof_action, &saved_sigprof_action));
+#   endif  // GTEST_OS_LINUX
+
+#   if GTEST_HAS_CLONE
+  const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+  if (!use_fork) {
+    static const bool stack_grows_down = StackGrowsDown();
+    const size_t stack_size = getpagesize();
+    // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+    void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
+                             MAP_ANON | MAP_PRIVATE, -1, 0);
+    GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+
+    // Maximum stack alignment in bytes:  For a downward-growing stack, this
+    // amount is subtracted from size of the stack space to get an address
+    // that is within the stack space and is aligned on all systems we care
+    // about.  As far as I know there is no ABI with stack alignment greater
+    // than 64.  We assume stack and stack_size already have alignment of
+    // kMaxStackAlignment.
+    const size_t kMaxStackAlignment = 64;
+    void* const stack_top =
+        static_cast<char*>(stack) +
+            (stack_grows_down ? stack_size - kMaxStackAlignment : 0);
+    GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment &&
+        reinterpret_cast<intptr_t>(stack_top) % kMaxStackAlignment == 0);
+
+    child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+    GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+  }
+#   else
+  const bool use_fork = true;
+#   endif  // GTEST_HAS_CLONE
+
+  if (use_fork && (child_pid = fork()) == 0) {
+      ExecDeathTestChildMain(&args);
+      _exit(0);
+  }
+#  endif  // GTEST_OS_QNX
+#  if GTEST_OS_LINUX
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(
+      sigaction(SIGPROF, &saved_sigprof_action, NULL));
+#  endif  // GTEST_OS_LINUX
+
+  GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+  return child_pid;
+}
+
+// The AssumeRole process for a fork-and-exec death test.  It re-executes the
+// main program from the beginning, setting the --gtest_filter
+// and --gtest_internal_run_death_test flags to cause only the current
+// death test to be re-run.
+DeathTest::TestRole ExecDeathTest::AssumeRole() {
+  const UnitTestImpl* const impl = GetUnitTestImpl();
+  const InternalRunDeathTestFlag* const flag =
+      impl->internal_run_death_test_flag();
+  const TestInfo* const info = impl->current_test_info();
+  const int death_test_index = info->result()->death_test_count();
+
+  if (flag != NULL) {
+    set_write_fd(flag->write_fd());
+    return EXECUTE_TEST;
+  }
+
+  int pipe_fd[2];
+  GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+  // Clear the close-on-exec flag on the write end of the pipe, lest
+  // it be closed when the child process does an exec:
+  GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
+
+  const std::string filter_flag =
+      std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "="
+      + info->test_case_name() + "." + info->name();
+  const std::string internal_flag =
+      std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "="
+      + file_ + "|" + StreamableToString(line_) + "|"
+      + StreamableToString(death_test_index) + "|"
+      + StreamableToString(pipe_fd[1]);
+  Arguments args;
+  args.AddArguments(GetArgvsForDeathTestChildProcess());
+  args.AddArgument(filter_flag.c_str());
+  args.AddArgument(internal_flag.c_str());
+
+  DeathTest::set_last_death_test_message("");
+
+  CaptureStderr();
+  // See the comment in NoExecDeathTest::AssumeRole for why the next line
+  // is necessary.
+  FlushInfoLog();
+
+  const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]);
+  GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+  set_child_pid(child_pid);
+  set_read_fd(pipe_fd[0]);
+  set_spawned(true);
+  return OVERSEE_TEST;
+}
+
+# endif  // !GTEST_OS_WINDOWS
+
+// Creates a concrete DeathTest-derived class that depends on the
+// --gtest_death_test_style flag, and sets the pointer pointed to
+// by the "test" argument to its address.  If the test should be
+// skipped, sets that pointer to NULL.  Returns true, unless the
+// flag is set to an invalid value.
+bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
+                                     const char* file, int line,
+                                     DeathTest** test) {
+  UnitTestImpl* const impl = GetUnitTestImpl();
+  const InternalRunDeathTestFlag* const flag =
+      impl->internal_run_death_test_flag();
+  const int death_test_index = impl->current_test_info()
+      ->increment_death_test_count();
+
+  if (flag != NULL) {
+    if (death_test_index > flag->index()) {
+      DeathTest::set_last_death_test_message(
+          "Death test count (" + StreamableToString(death_test_index)
+          + ") somehow exceeded expected maximum ("
+          + StreamableToString(flag->index()) + ")");
+      return false;
+    }
+
+    if (!(flag->file() == file && flag->line() == line &&
+          flag->index() == death_test_index)) {
+      *test = NULL;
+      return true;
+    }
+  }
+
+# if GTEST_OS_WINDOWS
+
+  if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+      GTEST_FLAG(death_test_style) == "fast") {
+    *test = new WindowsDeathTest(statement, regex, file, line);
+  }
+
+# else
+
+  if (GTEST_FLAG(death_test_style) == "threadsafe") {
+    *test = new ExecDeathTest(statement, regex, file, line);
+  } else if (GTEST_FLAG(death_test_style) == "fast") {
+    *test = new NoExecDeathTest(statement, regex);
+  }
+
+# endif  // GTEST_OS_WINDOWS
+
+  else {  // NOLINT - this is more readable than unbalanced brackets inside #if.
+    DeathTest::set_last_death_test_message(
+        "Unknown death test style \"" + GTEST_FLAG(death_test_style)
+        + "\" encountered");
+    return false;
+  }
+
+  return true;
+}
+
+# if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+int GetStatusFileDescriptor(unsigned int parent_process_id,
+                            size_t write_handle_as_size_t,
+                            size_t event_handle_as_size_t) {
+  AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+                                                   FALSE,  // Non-inheritable.
+                                                   parent_process_id));
+  if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+    DeathTestAbort("Unable to open parent process " +
+                   StreamableToString(parent_process_id));
+  }
+
+  // TODO(vladl@google.com): Replace the following check with a
+  // compile-time assertion when available.
+  GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+  const HANDLE write_handle =
+      reinterpret_cast<HANDLE>(write_handle_as_size_t);
+  HANDLE dup_write_handle;
+
+  // The newly initialized handle is accessible only in in the parent
+  // process. To obtain one accessible within the child, we need to use
+  // DuplicateHandle.
+  if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+                         ::GetCurrentProcess(), &dup_write_handle,
+                         0x0,    // Requested privileges ignored since
+                                 // DUPLICATE_SAME_ACCESS is used.
+                         FALSE,  // Request non-inheritable handler.
+                         DUPLICATE_SAME_ACCESS)) {
+    DeathTestAbort("Unable to duplicate the pipe handle " +
+                   StreamableToString(write_handle_as_size_t) +
+                   " from the parent process " +
+                   StreamableToString(parent_process_id));
+  }
+
+  const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+  HANDLE dup_event_handle;
+
+  if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+                         ::GetCurrentProcess(), &dup_event_handle,
+                         0x0,
+                         FALSE,
+                         DUPLICATE_SAME_ACCESS)) {
+    DeathTestAbort("Unable to duplicate the event handle " +
+                   StreamableToString(event_handle_as_size_t) +
+                   " from the parent process " +
+                   StreamableToString(parent_process_id));
+  }
+
+  const int write_fd =
+      ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+  if (write_fd == -1) {
+    DeathTestAbort("Unable to convert pipe handle " +
+                   StreamableToString(write_handle_as_size_t) +
+                   " to a file descriptor");
+  }
+
+  // Signals the parent that the write end of the pipe has been acquired
+  // so the parent can release its own write end.
+  ::SetEvent(dup_event_handle);
+
+  return write_fd;
+}
+# endif  // GTEST_OS_WINDOWS
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
+  if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
+
+  // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
+  // can use it here.
+  int line = -1;
+  int index = -1;
+  ::std::vector< ::std::string> fields;
+  SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+  int write_fd = -1;
+
+# if GTEST_OS_WINDOWS
+
+  unsigned int parent_process_id = 0;
+  size_t write_handle_as_size_t = 0;
+  size_t event_handle_as_size_t = 0;
+
+  if (fields.size() != 6
+      || !ParseNaturalNumber(fields[1], &line)
+      || !ParseNaturalNumber(fields[2], &index)
+      || !ParseNaturalNumber(fields[3], &parent_process_id)
+      || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+      || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+    DeathTestAbort("Bad --gtest_internal_run_death_test flag: " +
+                   GTEST_FLAG(internal_run_death_test));
+  }
+  write_fd = GetStatusFileDescriptor(parent_process_id,
+                                     write_handle_as_size_t,
+                                     event_handle_as_size_t);
+# else
+
+  if (fields.size() != 4
+      || !ParseNaturalNumber(fields[1], &line)
+      || !ParseNaturalNumber(fields[2], &index)
+      || !ParseNaturalNumber(fields[3], &write_fd)) {
+    DeathTestAbort("Bad --gtest_internal_run_death_test flag: "
+        + GTEST_FLAG(internal_run_death_test));
+  }
+
+# endif  // GTEST_OS_WINDOWS
+
+  return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
+}
+
+}  // namespace internal
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+}  // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keith.ray@gmail.com (Keith Ray)
+
+
+#include <stdlib.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+# include <windows.h>
+#elif GTEST_OS_WINDOWS
+# include <direct.h>
+# include <io.h>
+#elif GTEST_OS_SYMBIAN
+// Symbian OpenC has PATH_MAX in sys/syslimits.h
+# include <sys/syslimits.h>
+#else
+# include <limits.h>
+# include <climits>  // Some Linux distributions define PATH_MAX here.
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_MAX_ _MAX_PATH
+#elif defined(PATH_MAX)
+# define GTEST_PATH_MAX_ PATH_MAX
+#elif defined(_XOPEN_PATH_MAX)
+# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
+#else
+# define GTEST_PATH_MAX_ _POSIX_PATH_MAX
+#endif  // GTEST_OS_WINDOWS
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
+const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
+const char kAlternatePathSeparatorString[] = "/";
+# if GTEST_OS_WINDOWS_MOBILE
+// Windows CE doesn't have a current directory. You should not use
+// the current directory in tests on Windows CE, but this at least
+// provides a reasonable fallback.
+const char kCurrentDirectoryString[] = "\\";
+// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
+const DWORD kInvalidFileAttributes = 0xffffffff;
+# else
+const char kCurrentDirectoryString[] = ".\\";
+# endif  // GTEST_OS_WINDOWS_MOBILE
+#else
+const char kPathSeparator = '/';
+const char kCurrentDirectoryString[] = "./";
+#endif  // GTEST_OS_WINDOWS
+
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+  return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+  return c == kPathSeparator;
+#endif
+}
+
+// Returns the current working directory, or "" if unsuccessful.
+FilePath FilePath::GetCurrentDir() {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+  // Windows CE doesn't have a current directory, so we just return
+  // something reasonable.
+  return FilePath(kCurrentDirectoryString);
+#elif GTEST_OS_WINDOWS
+  char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+  return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#else
+  char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+  char* result = getcwd(cwd, sizeof(cwd));
+# if GTEST_OS_NACL
+  // getcwd will likely fail in NaCl due to the sandbox, so return something
+  // reasonable. The user may have provided a shim implementation for getcwd,
+  // however, so fallback only when failure is detected.
+  return FilePath(result == NULL ? kCurrentDirectoryString : cwd);
+# endif  // GTEST_OS_NACL
+  return FilePath(result == NULL ? "" : cwd);
+#endif  // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns a copy of the FilePath with the case-insensitive extension removed.
+// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+// FilePath("dir/file"). If a case-insensitive extension is not
+// found, returns a copy of the original FilePath.
+FilePath FilePath::RemoveExtension(const char* extension) const {
+  const std::string dot_extension = std::string(".") + extension;
+  if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) {
+    return FilePath(pathname_.substr(
+        0, pathname_.length() - dot_extension.length()));
+  }
+  return *this;
+}
+
+// Returns a pointer to the last occurence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+  const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+  const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+  // Comparing two pointers of which only one is NULL is undefined.
+  if (last_alt_sep != NULL &&
+      (last_sep == NULL || last_alt_sep > last_sep)) {
+    return last_alt_sep;
+  }
+#endif
+  return last_sep;
+}
+
+// Returns a copy of the FilePath with the directory part removed.
+// Example: FilePath("path/to/file").RemoveDirectoryName() returns
+// FilePath("file"). If there is no directory part ("just_a_file"), it returns
+// the FilePath unmodified. If there is no file part ("just_a_dir/") it
+// returns an empty FilePath ("").
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveDirectoryName() const {
+  const char* const last_sep = FindLastPathSeparator();
+  return last_sep ? FilePath(last_sep + 1) : *this;
+}
+
+// RemoveFileName returns the directory path with the filename removed.
+// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveFileName() const {
+  const char* const last_sep = FindLastPathSeparator();
+  std::string dir;
+  if (last_sep) {
+    dir = std::string(c_str(), last_sep + 1 - c_str());
+  } else {
+    dir = kCurrentDirectoryString;
+  }
+  return FilePath(dir);
+}
+
+// Helper functions for naming files in a directory for xml output.
+
+// Given directory = "dir", base_name = "test", number = 0,
+// extension = "xml", returns "dir/test.xml". If number is greater
+// than zero (e.g., 12), returns "dir/test_12.xml".
+// On Windows platform, uses \ as the separator rather than /.
+FilePath FilePath::MakeFileName(const FilePath& directory,
+                                const FilePath& base_name,
+                                int number,
+                                const char* extension) {
+  std::string file;
+  if (number == 0) {
+    file = base_name.string() + "." + extension;
+  } else {
+    file = base_name.string() + "_" + StreamableToString(number)
+        + "." + extension;
+  }
+  return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+                               const FilePath& relative_path) {
+  if (directory.IsEmpty())
+    return relative_path;
+  const FilePath dir(directory.RemoveTrailingPathSeparator());
+  return FilePath(dir.string() + kPathSeparator + relative_path.string());
+}
+
+// Returns true if pathname describes something findable in the file-system,
+// either a file, directory, or whatever.
+bool FilePath::FileOrDirectoryExists() const {
+#if GTEST_OS_WINDOWS_MOBILE
+  LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
+  const DWORD attributes = GetFileAttributes(unicode);
+  delete [] unicode;
+  return attributes != kInvalidFileAttributes;
+#else
+  posix::StatStruct file_stat;
+  return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif  // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns true if pathname describes a directory in the file-system
+// that exists.
+bool FilePath::DirectoryExists() const {
+  bool result = false;
+#if GTEST_OS_WINDOWS
+  // Don't strip off trailing separator if path is a root directory on
+  // Windows (like "C:\\").
+  const FilePath& path(IsRootDirectory() ? *this :
+                                           RemoveTrailingPathSeparator());
+#else
+  const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+  LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
+  const DWORD attributes = GetFileAttributes(unicode);
+  delete [] unicode;
+  if ((attributes != kInvalidFileAttributes) &&
+      (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
+    result = true;
+  }
+#else
+  posix::StatStruct file_stat;
+  result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+      posix::IsDir(file_stat);
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+  return result;
+}
+
+// Returns true if pathname describes a root directory. (Windows has one
+// root directory per disk drive.)
+bool FilePath::IsRootDirectory() const {
+#if GTEST_OS_WINDOWS
+  // TODO(wan@google.com): on Windows a network share like
+  // \\server\share can be a root directory, although it cannot be the
+  // current directory.  Handle this properly.
+  return pathname_.length() == 3 && IsAbsolutePath();
+#else
+  return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
+  const char* const name = pathname_.c_str();
+#if GTEST_OS_WINDOWS
+  return pathname_.length() >= 3 &&
+     ((name[0] >= 'a' && name[0] <= 'z') ||
+      (name[0] >= 'A' && name[0] <= 'Z')) &&
+     name[1] == ':' &&
+     IsPathSeparator(name[2]);
+#else
+  return IsPathSeparator(name[0]);
+#endif
+}
+
+// Returns a pathname for a file that does not currently exist. The pathname
+// will be directory/base_name.extension or
+// directory/base_name_<number>.extension if directory/base_name.extension
+// already exists. The number will be incremented until a pathname is found
+// that does not already exist.
+// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+// There could be a race condition if two or more processes are calling this
+// function at the same time -- they could both pick the same filename.
+FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
+                                          const FilePath& base_name,
+                                          const char* extension) {
+  FilePath full_pathname;
+  int number = 0;
+  do {
+    full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
+  } while (full_pathname.FileOrDirectoryExists());
+  return full_pathname;
+}
+
+// Returns true if FilePath ends with a path separator, which indicates that
+// it is intended to represent a directory. Returns false otherwise.
+// This does NOT check that a directory (or file) actually exists.
+bool FilePath::IsDirectory() const {
+  return !pathname_.empty() &&
+         IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
+}
+
+// Create directories so that path exists. Returns true if successful or if
+// the directories already exist; returns false if unable to create directories
+// for any reason.
+bool FilePath::CreateDirectoriesRecursively() const {
+  if (!this->IsDirectory()) {
+    return false;
+  }
+
+  if (pathname_.length() == 0 || this->DirectoryExists()) {
+    return true;
+  }
+
+  const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
+  return parent.CreateDirectoriesRecursively() && this->CreateFolder();
+}
+
+// Create the directory so that path exists. Returns true if successful or
+// if the directory already exists; returns false if unable to create the
+// directory for any reason, including if the parent directory does not
+// exist. Not named "CreateDirectory" because that's a macro on Windows.
+bool FilePath::CreateFolder() const {
+#if GTEST_OS_WINDOWS_MOBILE
+  FilePath removed_sep(this->RemoveTrailingPathSeparator());
+  LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
+  int result = CreateDirectory(unicode, NULL) ? 0 : -1;
+  delete [] unicode;
+#elif GTEST_OS_WINDOWS
+  int result = _mkdir(pathname_.c_str());
+#else
+  int result = mkdir(pathname_.c_str(), 0777);
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+  if (result == -1) {
+    return this->DirectoryExists();  // An error is OK if the directory exists.
+  }
+  return true;  // No error.
+}
+
+// If input name has a trailing separator character, remove it and return the
+// name, otherwise return the name string unmodified.
+// On Windows platform, uses \ as the separator, other platforms use /.
+FilePath FilePath::RemoveTrailingPathSeparator() const {
+  return IsDirectory()
+      ? FilePath(pathname_.substr(0, pathname_.length() - 1))
+      : *this;
+}
+
+// Removes any redundant separators that might be in the pathname.
+// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+// redundancies that might be in a pathname involving "." or "..".
+// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
+void FilePath::Normalize() {
+  if (pathname_.c_str() == NULL) {
+    pathname_ = "";
+    return;
+  }
+  const char* src = pathname_.c_str();
+  char* const dest = new char[pathname_.length() + 1];
+  char* dest_ptr = dest;
+  memset(dest_ptr, 0, pathname_.length() + 1);
+
+  while (*src != '\0') {
+    *dest_ptr = *src;
+    if (!IsPathSeparator(*src)) {
+      src++;
+    } else {
+#if GTEST_HAS_ALT_PATH_SEP_
+      if (*dest_ptr == kAlternatePathSeparator) {
+        *dest_ptr = kPathSeparator;
+      }
+#endif
+      while (IsPathSeparator(*src))
+        src++;
+    }
+    dest_ptr++;
+  }
+  *dest_ptr = '\0';
+  pathname_ = dest;
+  delete[] dest;
+}
+
+}  // namespace internal
+}  // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fstream>
+
+#if GTEST_OS_WINDOWS
+# include <windows.h>
+# include <io.h>
+# include <sys/stat.h>
+# include <map>  // Used in ThreadLocal.
+#else
+# include <unistd.h>
+#endif  // GTEST_OS_WINDOWS
+
+#if GTEST_OS_MAC
+# include <mach/mach_init.h>
+# include <mach/task.h>
+# include <mach/vm_map.h>
+#endif  // GTEST_OS_MAC
+
+#if GTEST_OS_QNX
+# include <devctl.h>
+# include <fcntl.h>
+# include <sys/procfs.h>
+#endif  // GTEST_OS_QNX
+
+#if GTEST_OS_AIX
+# include <procinfo.h>
+# include <sys/types.h>
+#endif  // GTEST_OS_AIX
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation.  It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error.  This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+namespace internal {
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif  // _MSC_VER
+
+#if GTEST_OS_LINUX
+
+namespace {
+template <typename T>
+T ReadProcFileField(const string& filename, int field) {
+  std::string dummy;
+  std::ifstream file(filename.c_str());
+  while (field-- > 0) {
+    file >> dummy;
+  }
+  T output = 0;
+  file >> output;
+  return output;
+}
+}  // namespace
+
+// Returns the number of active threads, or 0 when there is an error.
+size_t GetThreadCount() {
+  const string filename =
+      (Message() << "/proc/" << getpid() << "/stat").GetString();
+  return ReadProcFileField<int>(filename, 19);
+}
+
+#elif GTEST_OS_MAC
+
+size_t GetThreadCount() {
+  const task_t task = mach_task_self();
+  mach_msg_type_number_t thread_count;
+  thread_act_array_t thread_list;
+  const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+  if (status == KERN_SUCCESS) {
+    // task_threads allocates resources in thread_list and we need to free them
+    // to avoid leaks.
+    vm_deallocate(task,
+                  reinterpret_cast<vm_address_t>(thread_list),
+                  sizeof(thread_t) * thread_count);
+    return static_cast<size_t>(thread_count);
+  } else {
+    return 0;
+  }
+}
+
+#elif GTEST_OS_QNX
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+  const int fd = open("/proc/self/as", O_RDONLY);
+  if (fd < 0) {
+    return 0;
+  }
+  procfs_info process_info;
+  const int status =
+      devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL);
+  close(fd);
+  if (status == EOK) {
+    return static_cast<size_t>(process_info.num_threads);
+  } else {
+    return 0;
+  }
+}
+
+#elif GTEST_OS_AIX
+
+size_t GetThreadCount() {
+  struct procentry64 entry;
+  pid_t pid = getpid();
+  int status = getprocs64(&entry, sizeof(entry), NULL, 0, &pid, 1);
+  if (status == 1) {
+    return entry.pi_thcount;
+  } else {
+    return 0;
+  }
+}
+
+#else
+
+size_t GetThreadCount() {
+  // There's no portable way to detect the number of threads, so we just
+  // return 0 to indicate that we cannot detect it.
+  return 0;
+}
+
+#endif  // GTEST_OS_LINUX
+
+#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+void SleepMilliseconds(int n) {
+  ::Sleep(n);
+}
+
+AutoHandle::AutoHandle()
+    : handle_(INVALID_HANDLE_VALUE) {}
+
+AutoHandle::AutoHandle(Handle handle)
+    : handle_(handle) {}
+
+AutoHandle::~AutoHandle() {
+  Reset();
+}
+
+AutoHandle::Handle AutoHandle::Get() const {
+  return handle_;
+}
+
+void AutoHandle::Reset() {
+  Reset(INVALID_HANDLE_VALUE);
+}
+
+void AutoHandle::Reset(HANDLE handle) {
+  // Resetting with the same handle we already own is invalid.
+  if (handle_ != handle) {
+    if (IsCloseable()) {
+      ::CloseHandle(handle_);
+    }
+    handle_ = handle;
+  } else {
+    GTEST_CHECK_(!IsCloseable())
+        << "Resetting a valid handle to itself is likely a programmer error "
+            "and thus not allowed.";
+  }
+}
+
+bool AutoHandle::IsCloseable() const {
+  // Different Windows APIs may use either of these values to represent an
+  // invalid handle.
+  return handle_ != NULL && handle_ != INVALID_HANDLE_VALUE;
+}
+
+Notification::Notification()
+    : event_(::CreateEvent(NULL,   // Default security attributes.
+                           TRUE,   // Do not reset automatically.
+                           FALSE,  // Initially unset.
+                           NULL)) {  // Anonymous event.
+  GTEST_CHECK_(event_.Get() != NULL);
+}
+
+void Notification::Notify() {
+  GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE);
+}
+
+void Notification::WaitForNotification() {
+  GTEST_CHECK_(
+      ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0);
+}
+
+Mutex::Mutex()
+    : owner_thread_id_(0),
+      type_(kDynamic),
+      critical_section_init_phase_(0),
+      critical_section_(new CRITICAL_SECTION) {
+  ::InitializeCriticalSection(critical_section_);
+}
+
+Mutex::~Mutex() {
+  // Static mutexes are leaked intentionally. It is not thread-safe to try
+  // to clean them up.
+  // TODO(yukawa): Switch to Slim Reader/Writer (SRW) Locks, which requires
+  // nothing to clean it up but is available only on Vista and later.
+  // http://msdn.microsoft.com/en-us/library/windows/desktop/aa904937.aspx
+  if (type_ == kDynamic) {
+    ::DeleteCriticalSection(critical_section_);
+    delete critical_section_;
+    critical_section_ = NULL;
+  }
+}
+
+void Mutex::Lock() {
+  ThreadSafeLazyInit();
+  ::EnterCriticalSection(critical_section_);
+  owner_thread_id_ = ::GetCurrentThreadId();
+}
+
+void Mutex::Unlock() {
+  ThreadSafeLazyInit();
+  // We don't protect writing to owner_thread_id_ here, as it's the
+  // caller's responsibility to ensure that the current thread holds the
+  // mutex when this is called.
+  owner_thread_id_ = 0;
+  ::LeaveCriticalSection(critical_section_);
+}
+
+// Does nothing if the current thread holds the mutex. Otherwise, crashes
+// with high probability.
+void Mutex::AssertHeld() {
+  ThreadSafeLazyInit();
+  GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId())
+      << "The current thread is not holding the mutex @" << this;
+}
+
+// Initializes owner_thread_id_ and critical_section_ in static mutexes.
+void Mutex::ThreadSafeLazyInit() {
+  // Dynamic mutexes are initialized in the constructor.
+  if (type_ == kStatic) {
+    switch (
+        ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) {
+      case 0:
+        // If critical_section_init_phase_ was 0 before the exchange, we
+        // are the first to test it and need to perform the initialization.
+        owner_thread_id_ = 0;
+        critical_section_ = new CRITICAL_SECTION;
+        ::InitializeCriticalSection(critical_section_);
+        // Updates the critical_section_init_phase_ to 2 to signal
+        // initialization complete.
+        GTEST_CHECK_(::InterlockedCompareExchange(
+                          &critical_section_init_phase_, 2L, 1L) ==
+                      1L);
+        break;
+      case 1:
+        // Somebody else is already initializing the mutex; spin until they
+        // are done.
+        while (::InterlockedCompareExchange(&critical_section_init_phase_,
+                                            2L,
+                                            2L) != 2L) {
+          // Possibly yields the rest of the thread's time slice to other
+          // threads.
+          ::Sleep(0);
+        }
+        break;
+
+      case 2:
+        break;  // The mutex is already initialized and ready for use.
+
+      default:
+        GTEST_CHECK_(false)
+            << "Unexpected value of critical_section_init_phase_ "
+            << "while initializing a static mutex.";
+    }
+  }
+}
+
+namespace {
+
+class ThreadWithParamSupport : public ThreadWithParamBase {
+ public:
+  static HANDLE CreateThread(Runnable* runnable,
+                             Notification* thread_can_start) {
+    ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start);
+    DWORD thread_id;
+    // TODO(yukawa): Consider to use _beginthreadex instead.
+    HANDLE thread_handle = ::CreateThread(
+        NULL,    // Default security.
+        0,       // Default stack size.
+        &ThreadWithParamSupport::ThreadMain,
+        param,   // Parameter to ThreadMainStatic
+        0x0,     // Default creation flags.
+        &thread_id);  // Need a valid pointer for the call to work under Win98.
+    GTEST_CHECK_(thread_handle != NULL) << "CreateThread failed with error "
+                                        << ::GetLastError() << ".";
+    if (thread_handle == NULL) {
+      delete param;
+    }
+    return thread_handle;
+  }
+
+ private:
+  struct ThreadMainParam {
+    ThreadMainParam(Runnable* runnable, Notification* thread_can_start)
+        : runnable_(runnable),
+          thread_can_start_(thread_can_start) {
+    }
+    scoped_ptr<Runnable> runnable_;
+    // Does not own.
+    Notification* thread_can_start_;
+  };
+
+  static DWORD WINAPI ThreadMain(void* ptr) {
+    // Transfers ownership.
+    scoped_ptr<ThreadMainParam> param(static_cast<ThreadMainParam*>(ptr));
+    if (param->thread_can_start_ != NULL)
+      param->thread_can_start_->WaitForNotification();
+    param->runnable_->Run();
+    return 0;
+  }
+
+  // Prohibit instantiation.
+  ThreadWithParamSupport();
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport);
+};
+
+}  // namespace
+
+ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable,
+                                         Notification* thread_can_start)
+      : thread_(ThreadWithParamSupport::CreateThread(runnable,
+                                                     thread_can_start)) {
+}
+
+ThreadWithParamBase::~ThreadWithParamBase() {
+  Join();
+}
+
+void ThreadWithParamBase::Join() {
+  GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0)
+      << "Failed to join the thread with error " << ::GetLastError() << ".";
+}
+
+// Maps a thread to a set of ThreadIdToThreadLocals that have values
+// instantiated on that thread and notifies them when the thread exits.  A
+// ThreadLocal instance is expected to persist until all threads it has
+// values on have terminated.
+class ThreadLocalRegistryImpl {
+ public:
+  // Registers thread_local_instance as having value on the current thread.
+  // Returns a value that can be used to identify the thread from other threads.
+  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+      const ThreadLocalBase* thread_local_instance) {
+    DWORD current_thread = ::GetCurrentThreadId();
+    MutexLock lock(&mutex_);
+    ThreadIdToThreadLocals* const thread_to_thread_locals =
+        GetThreadLocalsMapLocked();
+    ThreadIdToThreadLocals::iterator thread_local_pos =
+        thread_to_thread_locals->find(current_thread);
+    if (thread_local_pos == thread_to_thread_locals->end()) {
+      thread_local_pos = thread_to_thread_locals->insert(
+          std::make_pair(current_thread, ThreadLocalValues())).first;
+      StartWatcherThreadFor(current_thread);
+    }
+    ThreadLocalValues& thread_local_values = thread_local_pos->second;
+    ThreadLocalValues::iterator value_pos =
+        thread_local_values.find(thread_local_instance);
+    if (value_pos == thread_local_values.end()) {
+      value_pos =
+          thread_local_values
+              .insert(std::make_pair(
+                  thread_local_instance,
+                  linked_ptr<ThreadLocalValueHolderBase>(
+                      thread_local_instance->NewValueForCurrentThread())))
+              .first;
+    }
+    return value_pos->second.get();
+  }
+
+  static void OnThreadLocalDestroyed(
+      const ThreadLocalBase* thread_local_instance) {
+    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
+    // Clean up the ThreadLocalValues data structure while holding the lock, but
+    // defer the destruction of the ThreadLocalValueHolderBases.
+    {
+      MutexLock lock(&mutex_);
+      ThreadIdToThreadLocals* const thread_to_thread_locals =
+          GetThreadLocalsMapLocked();
+      for (ThreadIdToThreadLocals::iterator it =
+          thread_to_thread_locals->begin();
+          it != thread_to_thread_locals->end();
+          ++it) {
+        ThreadLocalValues& thread_local_values = it->second;
+        ThreadLocalValues::iterator value_pos =
+            thread_local_values.find(thread_local_instance);
+        if (value_pos != thread_local_values.end()) {
+          value_holders.push_back(value_pos->second);
+          thread_local_values.erase(value_pos);
+          // This 'if' can only be successful at most once, so theoretically we
+          // could break out of the loop here, but we don't bother doing so.
+        }
+      }
+    }
+    // Outside the lock, let the destructor for 'value_holders' deallocate the
+    // ThreadLocalValueHolderBases.
+  }
+
+  static void OnThreadExit(DWORD thread_id) {
+    GTEST_CHECK_(thread_id != 0) << ::GetLastError();
+    std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
+    // Clean up the ThreadIdToThreadLocals data structure while holding the
+    // lock, but defer the destruction of the ThreadLocalValueHolderBases.
+    {
+      MutexLock lock(&mutex_);
+      ThreadIdToThreadLocals* const thread_to_thread_locals =
+          GetThreadLocalsMapLocked();
+      ThreadIdToThreadLocals::iterator thread_local_pos =
+          thread_to_thread_locals->find(thread_id);
+      if (thread_local_pos != thread_to_thread_locals->end()) {
+        ThreadLocalValues& thread_local_values = thread_local_pos->second;
+        for (ThreadLocalValues::iterator value_pos =
+            thread_local_values.begin();
+            value_pos != thread_local_values.end();
+            ++value_pos) {
+          value_holders.push_back(value_pos->second);
+        }
+        thread_to_thread_locals->erase(thread_local_pos);
+      }
+    }
+    // Outside the lock, let the destructor for 'value_holders' deallocate the
+    // ThreadLocalValueHolderBases.
+  }
+
+ private:
+  // In a particular thread, maps a ThreadLocal object to its value.
+  typedef std::map<const ThreadLocalBase*,
+                   linked_ptr<ThreadLocalValueHolderBase> > ThreadLocalValues;
+  // Stores all ThreadIdToThreadLocals having values in a thread, indexed by
+  // thread's ID.
+  typedef std::map<DWORD, ThreadLocalValues> ThreadIdToThreadLocals;
+
+  // Holds the thread id and thread handle that we pass from
+  // StartWatcherThreadFor to WatcherThreadFunc.
+  typedef std::pair<DWORD, HANDLE> ThreadIdAndHandle;
+
+  static void StartWatcherThreadFor(DWORD thread_id) {
+    // The returned handle will be kept in thread_map and closed by
+    // watcher_thread in WatcherThreadFunc.
+    HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
+                                 FALSE,
+                                 thread_id);
+    GTEST_CHECK_(thread != NULL);
+    // We need to to pass a valid thread ID pointer into CreateThread for it
+    // to work correctly under Win98.
+    DWORD watcher_thread_id;
+    HANDLE watcher_thread = ::CreateThread(
+        NULL,   // Default security.
+        0,      // Default stack size
+        &ThreadLocalRegistryImpl::WatcherThreadFunc,
+        reinterpret_cast<LPVOID>(new ThreadIdAndHandle(thread_id, thread)),
+        CREATE_SUSPENDED,
+        &watcher_thread_id);
+    GTEST_CHECK_(watcher_thread != NULL);
+    // Give the watcher thread the same priority as ours to avoid being
+    // blocked by it.
+    ::SetThreadPriority(watcher_thread,
+                        ::GetThreadPriority(::GetCurrentThread()));
+    ::ResumeThread(watcher_thread);
+    ::CloseHandle(watcher_thread);
+  }
+
+  // Monitors exit from a given thread and notifies those
+  // ThreadIdToThreadLocals about thread termination.
+  static DWORD WINAPI WatcherThreadFunc(LPVOID param) {
+    const ThreadIdAndHandle* tah =
+        reinterpret_cast<const ThreadIdAndHandle*>(param);
+    GTEST_CHECK_(
+        ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0);
+    OnThreadExit(tah->first);
+    ::CloseHandle(tah->second);
+    delete tah;
+    return 0;
+  }
+
+  // Returns map of thread local instances.
+  static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() {
+    mutex_.AssertHeld();
+    static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals;
+    return map;
+  }
+
+  // Protects access to GetThreadLocalsMapLocked() and its return value.
+  static Mutex mutex_;
+  // Protects access to GetThreadMapLocked() and its return value.
+  static Mutex thread_map_mutex_;
+};
+
+Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex);
+Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex);
+
+ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread(
+      const ThreadLocalBase* thread_local_instance) {
+  return ThreadLocalRegistryImpl::GetValueOnCurrentThread(
+      thread_local_instance);
+}
+
+void ThreadLocalRegistry::OnThreadLocalDestroyed(
+      const ThreadLocalBase* thread_local_instance) {
+  ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance);
+}
+
+#endif  // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+#if GTEST_USES_POSIX_RE
+
+// Implements RE.  Currently only needed for death tests.
+
+RE::~RE() {
+  if (is_valid_) {
+    // regfree'ing an invalid regex might crash because the content
+    // of the regex is undefined. Since the regex's are essentially
+    // the same, one cannot be valid (or invalid) without the other
+    // being so too.
+    regfree(&partial_regex_);
+    regfree(&full_regex_);
+  }
+  free(const_cast<char*>(pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+  if (!re.is_valid_) return false;
+
+  regmatch_t match;
+  return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+  if (!re.is_valid_) return false;
+
+  regmatch_t match;
+  return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+  pattern_ = posix::StrDup(regex);
+
+  // Reserves enough bytes to hold the regular expression used for a
+  // full match.
+  const size_t full_regex_len = strlen(regex) + 10;
+  char* const full_pattern = new char[full_regex_len];
+
+  snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
+  is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
+  // We want to call regcomp(&partial_regex_, ...) even if the
+  // previous expression returns false.  Otherwise partial_regex_ may
+  // not be properly initialized can may cause trouble when it's
+  // freed.
+  //
+  // Some implementation of POSIX regex (e.g. on at least some
+  // versions of Cygwin) doesn't accept the empty string as a valid
+  // regex.  We change it to an equivalent form "()" to be safe.
+  if (is_valid_) {
+    const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+    is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+  }
+  EXPECT_TRUE(is_valid_)
+      << "Regular expression \"" << regex
+      << "\" is not a valid POSIX Extended regular expression.";
+
+  delete[] full_pattern;
+}
+
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true iff ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+  return ch != '\0' && strchr(str, ch) != NULL;
+}
+
+// Returns true iff ch belongs to the given classification.  Unlike
+// similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsAsciiPunct(char ch) {
+  return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsAsciiWordChar(char ch) {
+  return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+      ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true iff "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+  return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true iff the given atom (specified by escaped and pattern)
+// matches ch.  The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+  if (escaped) {  // "\\p" where p is pattern_char.
+    switch (pattern_char) {
+      case 'd': return IsAsciiDigit(ch);
+      case 'D': return !IsAsciiDigit(ch);
+      case 'f': return ch == '\f';
+      case 'n': return ch == '\n';
+      case 'r': return ch == '\r';
+      case 's': return IsAsciiWhiteSpace(ch);
+      case 'S': return !IsAsciiWhiteSpace(ch);
+      case 't': return ch == '\t';
+      case 'v': return ch == '\v';
+      case 'w': return IsAsciiWordChar(ch);
+      case 'W': return !IsAsciiWordChar(ch);
+    }
+    return IsAsciiPunct(pattern_char) && pattern_char == ch;
+  }
+
+  return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+std::string FormatRegexSyntaxError(const char* regex, int index) {
+  return (Message() << "Syntax error at index " << index
+          << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+  if (regex == NULL) {
+    // TODO(wan@google.com): fix the source file location in the
+    // assertion failures to match where the regex is used in user
+    // code.
+    ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+    return false;
+  }
+
+  bool is_valid = true;
+
+  // True iff ?, *, or + can follow the previous atom.
+  bool prev_repeatable = false;
+  for (int i = 0; regex[i]; i++) {
+    if (regex[i] == '\\') {  // An escape sequence
+      i++;
+      if (regex[i] == '\0') {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+                      << "'\\' cannot appear at the end.";
+        return false;
+      }
+
+      if (!IsValidEscape(regex[i])) {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+                      << "invalid escape sequence \"\\" << regex[i] << "\".";
+        is_valid = false;
+      }
+      prev_repeatable = true;
+    } else {  // Not an escape sequence.
+      const char ch = regex[i];
+
+      if (ch == '^' && i > 0) {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+                      << "'^' can only appear at the beginning.";
+        is_valid = false;
+      } else if (ch == '$' && regex[i + 1] != '\0') {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+                      << "'$' can only appear at the end.";
+        is_valid = false;
+      } else if (IsInSet(ch, "()[]{}|")) {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+                      << "'" << ch << "' is unsupported.";
+        is_valid = false;
+      } else if (IsRepeat(ch) && !prev_repeatable) {
+        ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+                      << "'" << ch << "' can only follow a repeatable token.";
+        is_valid = false;
+      }
+
+      prev_repeatable = !IsInSet(ch, "^$?*+");
+    }
+  }
+
+  return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression.  The regex atom is defined as c if escaped is false,
+// or \c otherwise.  repeat is the repetition meta character (?, *,
+// or +).  The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway.  We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+    bool escaped, char c, char repeat, const char* regex,
+    const char* str) {
+  const size_t min_count = (repeat == '+') ? 1 : 0;
+  const size_t max_count = (repeat == '?') ? 1 :
+      static_cast<size_t>(-1) - 1;
+  // We cannot call numeric_limits::max() as it conflicts with the
+  // max() macro on Windows.
+
+  for (size_t i = 0; i <= max_count; ++i) {
+    // We know that the atom matches each of the first i characters in str.
+    if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+      // We have enough matches at the head, and the tail matches too.
+      // Since we only care about *whether* the pattern matches str
+      // (as opposed to *how* it matches), there is no need to find a
+      // greedy match.
+      return true;
+    }
+    if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+      return false;
+  }
+  return false;
+}
+
+// Returns true iff regex matches a prefix of str.  regex must be a
+// valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+  if (*regex == '\0')  // An empty regex matches a prefix of anything.
+    return true;
+
+  // "$" only matches the end of a string.  Note that regex being
+  // valid guarantees that there's nothing after "$" in it.
+  if (*regex == '$')
+    return *str == '\0';
+
+  // Is the first thing in regex an escape sequence?
+  const bool escaped = *regex == '\\';
+  if (escaped)
+    ++regex;
+  if (IsRepeat(regex[1])) {
+    // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+    // here's an indirect recursion.  It terminates as the regex gets
+    // shorter in each recursion.
+    return MatchRepetitionAndRegexAtHead(
+        escaped, regex[0], regex[1], regex + 2, str);
+  } else {
+    // regex isn't empty, isn't "$", and doesn't start with a
+    // repetition.  We match the first atom of regex with the first
+    // character of str and recurse.
+    return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+        MatchRegexAtHead(regex + 1, str + 1);
+  }
+}
+
+// Returns true iff regex matches any substring of str.  regex must be
+// a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally.  In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+  if (regex == NULL || str == NULL)
+    return false;
+
+  if (*regex == '^')
+    return MatchRegexAtHead(regex + 1, str);
+
+  // A successful match can be anywhere in str.
+  do {
+    if (MatchRegexAtHead(regex, str))
+      return true;
+  } while (*str++ != '\0');
+  return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+  free(const_cast<char*>(pattern_));
+  free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+  return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+  return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+  pattern_ = full_pattern_ = NULL;
+  if (regex != NULL) {
+    pattern_ = posix::StrDup(regex);
+  }
+
+  is_valid_ = ValidateRegex(regex);
+  if (!is_valid_) {
+    // No need to calculate the full pattern when the regex is invalid.
+    return;
+  }
+
+  const size_t len = strlen(regex);
+  // Reserves enough bytes to hold the regular expression used for a
+  // full match: we need space to prepend a '^', append a '$', and
+  // terminate the string with '\0'.
+  char* buffer = static_cast<char*>(malloc(len + 3));
+  full_pattern_ = buffer;
+
+  if (*regex != '^')
+    *buffer++ = '^';  // Makes sure full_pattern_ starts with '^'.
+
+  // We don't use snprintf or strncpy, as they trigger a warning when
+  // compiled with VC++ 8.0.
+  memcpy(buffer, regex, len);
+  buffer += len;
+
+  if (len == 0 || regex[len - 1] != '$')
+    *buffer++ = '$';  // Makes sure full_pattern_ ends with '$'.
+
+  *buffer = '\0';
+}
+
+#endif  // GTEST_USES_POSIX_RE
+
+const char kUnknownFile[] = "unknown file";
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {
+  const std::string file_name(file == NULL ? kUnknownFile : file);
+
+  if (line < 0) {
+    return file_name + ":";
+  }
+#ifdef _MSC_VER
+  return file_name + "(" + StreamableToString(line) + "):";
+#else
+  return file_name + ":" + StreamableToString(line) + ":";
+#endif  // _MSC_VER
+}
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+// Note that FormatCompilerIndependentFileLocation() does NOT append colon
+// to the file location it produces, unlike FormatFileLocation().
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
+    const char* file, int line) {
+  const std::string file_name(file == NULL ? kUnknownFile : file);
+
+  if (line < 0)
+    return file_name;
+  else
+    return file_name + ":" + StreamableToString(line);
+}
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+    : severity_(severity) {
+  const char* const marker =
+      severity == GTEST_INFO ?    "[  INFO ]" :
+      severity == GTEST_WARNING ? "[WARNING]" :
+      severity == GTEST_ERROR ?   "[ ERROR ]" : "[ FATAL ]";
+  GetStream() << ::std::endl << marker << " "
+              << FormatFileLocation(file, line).c_str() << ": ";
+}
+
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+  GetStream() << ::std::endl;
+  if (severity_ == GTEST_FATAL) {
+    fflush(stderr);
+    posix::Abort();
+  }
+}
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
+ public:
+  // The ctor redirects the stream to a temporary file.
+  explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+# if GTEST_OS_WINDOWS
+    char temp_dir_path[MAX_PATH + 1] = { '\0' };  // NOLINT
+    char temp_file_path[MAX_PATH + 1] = { '\0' };  // NOLINT
+
+    ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+    const UINT success = ::GetTempFileNameA(temp_dir_path,
+                                            "gtest_redir",
+                                            0,  // Generate unique file name.
+                                            temp_file_path);
+    GTEST_CHECK_(success != 0)
+        << "Unable to create a temporary file in " << temp_dir_path;
+    const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+    GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+                                    << temp_file_path;
+    filename_ = temp_file_path;
+# else
+    // There's no guarantee that a test has write access to the current
+    // directory, so we create the temporary file in the /tmp directory
+    // instead. We use /tmp on most systems, and /sdcard on Android.
+    // That's because Android doesn't have /tmp.
+#  if GTEST_OS_LINUX_ANDROID
+    // Note: Android applications are expected to call the framework's
+    // Context.getExternalStorageDirectory() method through JNI to get
+    // the location of the world-writable SD Card directory. However,
+    // this requires a Context handle, which cannot be retrieved
+    // globally from native code. Doing so also precludes running the
+    // code as part of a regular standalone executable, which doesn't
+    // run in a Dalvik process (e.g. when running it through 'adb shell').
+    //
+    // The location /sdcard is directly accessible from native code
+    // and is the only location (unofficially) supported by the Android
+    // team. It's generally a symlink to the real SD Card mount point
+    // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or
+    // other OEM-customized locations. Never rely on these, and always
+    // use /sdcard.
+    char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX";
+#  else
+    char name_template[] = "/tmp/captured_stream.XXXXXX";
+#  endif  // GTEST_OS_LINUX_ANDROID
+    const int captured_fd = mkstemp(name_template);
+    filename_ = name_template;
+# endif  // GTEST_OS_WINDOWS
+    fflush(NULL);
+    dup2(captured_fd, fd_);
+    close(captured_fd);
+  }
+
+  ~CapturedStream() {
+    remove(filename_.c_str());
+  }
+
+  std::string GetCapturedString() {
+    if (uncaptured_fd_ != -1) {
+      // Restores the original stream.
+      fflush(NULL);
+      dup2(uncaptured_fd_, fd_);
+      close(uncaptured_fd_);
+      uncaptured_fd_ = -1;
+    }
+
+    FILE* const file = posix::FOpen(filename_.c_str(), "r");
+    const std::string content = ReadEntireFile(file);
+    posix::FClose(file);
+    return content;
+  }
+
+ private:
+  const int fd_;  // A stream to capture.
+  int uncaptured_fd_;
+  // Name of the temporary file holding the stderr output.
+  ::std::string filename_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
+
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+static CapturedStream* g_captured_stderr = NULL;
+static CapturedStream* g_captured_stdout = NULL;
+
+// Starts capturing an output stream (stdout/stderr).
+void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+  if (*stream != NULL) {
+    GTEST_LOG_(FATAL) << "Only one " << stream_name
+                      << " capturer can exist at a time.";
+  }
+  *stream = new CapturedStream(fd);
+}
+
+// Stops capturing the output stream and returns the captured string.
+std::string GetCapturedStream(CapturedStream** captured_stream) {
+  const std::string content = (*captured_stream)->GetCapturedString();
+
+  delete *captured_stream;
+  *captured_stream = NULL;
+
+  return content;
+}
+
+// Starts capturing stdout.
+void CaptureStdout() {
+  CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+  CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+std::string GetCapturedStdout() {
+  return GetCapturedStream(&g_captured_stdout);
+}
+
+// Stops capturing stderr and returns the captured string.
+std::string GetCapturedStderr() {
+  return GetCapturedStream(&g_captured_stderr);
+}
+
+#endif  // GTEST_HAS_STREAM_REDIRECTION
+
+std::string TempDir() {
+#if GTEST_OS_WINDOWS_MOBILE
+  return "\\temp\\";
+#elif GTEST_OS_WINDOWS
+  const char* temp_dir = posix::GetEnv("TEMP");
+  if (temp_dir == NULL || temp_dir[0] == '\0')
+    return "\\temp\\";
+  else if (temp_dir[strlen(temp_dir) - 1] == '\\')
+    return temp_dir;
+  else
+    return std::string(temp_dir) + "\\";
+#elif GTEST_OS_LINUX_ANDROID
+  return "/sdcard/";
+#else
+  return "/tmp/";
+#endif  // GTEST_OS_WINDOWS_MOBILE
+}
+
+size_t GetFileSize(FILE* file) {
+  fseek(file, 0, SEEK_END);
+  return static_cast<size_t>(ftell(file));
+}
+
+std::string ReadEntireFile(FILE* file) {
+  const size_t file_size = GetFileSize(file);
+  char* const buffer = new char[file_size];
+
+  size_t bytes_last_read = 0;  // # of bytes read in the last fread()
+  size_t bytes_read = 0;       // # of bytes read so far
+
+  fseek(file, 0, SEEK_SET);
+
+  // Keeps reading the file until we cannot read further or the
+  // pre-determined file size is reached.
+  do {
+    bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
+    bytes_read += bytes_last_read;
+  } while (bytes_last_read > 0 && bytes_read < file_size);
+
+  const std::string content(buffer, bytes_read);
+  delete[] buffer;
+
+  return content;
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+static const ::std::vector<testing::internal::string>* g_injected_test_argvs =
+                                        NULL;  // Owned.
+
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>* argvs) {
+  if (g_injected_test_argvs != argvs)
+    delete g_injected_test_argvs;
+  g_injected_test_argvs = argvs;
+}
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs() {
+  if (g_injected_test_argvs != NULL) {
+    return *g_injected_test_argvs;
+  }
+  return GetArgvs();
+}
+#endif  // GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
+  DebugBreak();
+  TerminateProcess(GetCurrentProcess(), 1);
+}
+}  // namespace posix
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+// Returns the name of the environment variable corresponding to the
+// given flag.  For example, FlagToEnvVar("foo") will return
+// "GTEST_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+  const std::string full_flag =
+      (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
+
+  Message env_var;
+  for (size_t i = 0; i != full_flag.length(); i++) {
+    env_var << ToUpper(full_flag.c_str()[i]);
+  }
+
+  return env_var.GetString();
+}
+
+// Parses 'str' for a 32-bit signed integer.  If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
+  // Parses the environment variable as a decimal integer.
+  char* end = NULL;
+  const long long_value = strtol(str, &end, 10);  // NOLINT
+
+  // Has strtol() consumed all characters in the string?
+  if (*end != '\0') {
+    // No - an invalid character was encountered.
+    Message msg;
+    msg << "WARNING: " << src_text
+        << " is expected to be a 32-bit integer, but actually"
+        << " has value \"" << str << "\".\n";
+    printf("%s", msg.GetString().c_str());
+    fflush(stdout);
+    return false;
+  }
+
+  // Is the parsed value in the range of an Int32?
+  const Int32 result = static_cast<Int32>(long_value);
+  if (long_value == LONG_MAX || long_value == LONG_MIN ||
+      // The parsed value overflows as a long.  (strtol() returns
+      // LONG_MAX or LONG_MIN when the input overflows.)
+      result != long_value
+      // The parsed value overflows as an Int32.
+      ) {
+    Message msg;
+    msg << "WARNING: " << src_text
+        << " is expected to be a 32-bit integer, but actually"
+        << " has value " << str << ", which overflows.\n";
+    printf("%s", msg.GetString().c_str());
+    fflush(stdout);
+    return false;
+  }
+
+  *value = result;
+  return true;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+#if defined(GTEST_GET_BOOL_FROM_ENV_)
+  return GTEST_GET_BOOL_FROM_ENV_(flag, default_value);
+#endif  // defined(GTEST_GET_BOOL_FROM_ENV_)
+  const std::string env_var = FlagToEnvVar(flag);
+  const char* const string_value = posix::GetEnv(env_var.c_str());
+  return string_value == NULL ?
+      default_value : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
+#if defined(GTEST_GET_INT32_FROM_ENV_)
+  return GTEST_GET_INT32_FROM_ENV_(flag, default_value);
+#endif  // defined(GTEST_GET_INT32_FROM_ENV_)
+  const std::string env_var = FlagToEnvVar(flag);
+  const char* const string_value = posix::GetEnv(env_var.c_str());
+  if (string_value == NULL) {
+    // The environment variable is not set.
+    return default_value;
+  }
+
+  Int32 result = default_value;
+  if (!ParseInt32(Message() << "Environment variable " << env_var,
+                  string_value, &result)) {
+    printf("The default value %s is used.\n",
+           (Message() << default_value).GetString().c_str());
+    fflush(stdout);
+    return default_value;
+  }
+
+  return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+std::string StringFromGTestEnv(const char* flag, const char* default_value) {
+#if defined(GTEST_GET_STRING_FROM_ENV_)
+  return GTEST_GET_STRING_FROM_ENV_(flag, default_value);
+#endif  // defined(GTEST_GET_STRING_FROM_ENV_)
+  const std::string env_var = FlagToEnvVar(flag);
+  const char* value = posix::GetEnv(env_var.c_str());
+  if (value != NULL) {
+    return value;
+  }
+
+  // As a special case for the 'output' flag, if GTEST_OUTPUT is not
+  // set, we look for XML_OUTPUT_FILE, which is set by the Bazel build
+  // system.  The value of XML_OUTPUT_FILE is a filename without the
+  // "xml:" prefix of GTEST_OUTPUT.
+  //
+  // The net priority order after flag processing is thus:
+  //   --gtest_output command line flag
+  //   GTEST_OUTPUT environment variable
+  //   XML_OUTPUT_FILE environment variable
+  //   'default_value'
+  if (strcmp(flag, "output") == 0) {
+    value = posix::GetEnv("XML_OUTPUT_FILE");
+    if (value != NULL) {
+      return std::string("xml:") + value;
+    }
+  }
+  return default_value;
+}
+
+}  // namespace internal
+}  // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// It uses the << operator when possible, and prints the bytes in the
+// object otherwise.  A user can override its behavior for a class
+// type Foo by defining either operator<<(::std::ostream&, const Foo&)
+// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that
+// defines Foo.
+
+#include <ctype.h>
+#include <stdio.h>
+#include <cwchar>
+#include <ostream>  // NOLINT
+#include <string>
+
+namespace testing {
+
+namespace {
+
+using ::std::ostream;
+
+// Prints a segment of bytes in the given object.
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,
+                                size_t count, ostream* os) {
+  char text[5] = "";
+  for (size_t i = 0; i != count; i++) {
+    const size_t j = start + i;
+    if (i != 0) {
+      // Organizes the bytes into groups of 2 for easy parsing by
+      // human.
+      if ((j % 2) == 0)
+        *os << ' ';
+      else
+        *os << '-';
+    }
+    GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]);
+    *os << text;
+  }
+}
+
+// Prints the bytes in the given value to the given ostream.
+void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,
+                              ostream* os) {
+  // Tells the user how big the object is.
+  *os << count << "-byte object <";
+
+  const size_t kThreshold = 132;
+  const size_t kChunkSize = 64;
+  // If the object size is bigger than kThreshold, we'll have to omit
+  // some details by printing only the first and the last kChunkSize
+  // bytes.
+  // TODO(wan): let the user control the threshold using a flag.
+  if (count < kThreshold) {
+    PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);
+  } else {
+    PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);
+    *os << " ... ";
+    // Rounds up to 2-byte boundary.
+    const size_t resume_pos = (count - kChunkSize + 1)/2*2;
+    PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);
+  }
+  *os << ">";
+}
+
+}  // namespace
+
+namespace internal2 {
+
+// Delegates to PrintBytesInObjectToImpl() to print the bytes in the
+// given object.  The delegation simplifies the implementation, which
+// uses the << operator and thus is easier done outside of the
+// ::testing::internal namespace, which contains a << operator that
+// sometimes conflicts with the one in STL.
+void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
+                          ostream* os) {
+  PrintBytesInObjectToImpl(obj_bytes, count, os);
+}
+
+}  // namespace internal2
+
+namespace internal {
+
+// Depending on the value of a char (or wchar_t), we print it in one
+// of three formats:
+//   - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
+//   - as a hexidecimal escape sequence (e.g. '\x7F'), or
+//   - as a special escape sequence (e.g. '\r', '\n').
+enum CharFormat {
+  kAsIs,
+  kHexEscape,
+  kSpecialEscape
+};
+
+// Returns true if c is a printable ASCII character.  We test the
+// value of c directly instead of calling isprint(), which is buggy on
+// Windows Mobile.
+inline bool IsPrintableAscii(wchar_t c) {
+  return 0x20 <= c && c <= 0x7E;
+}
+
+// Prints a wide or narrow char c as a character literal without the
+// quotes, escaping it when necessary; returns how c was formatted.
+// The template argument UnsignedChar is the unsigned version of Char,
+// which is the type of c.
+template <typename UnsignedChar, typename Char>
+static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
+  switch (static_cast<wchar_t>(c)) {
+    case L'\0':
+      *os << "\\0";
+      break;
+    case L'\'':
+      *os << "\\'";
+      break;
+    case L'\\':
+      *os << "\\\\";
+      break;
+    case L'\a':
+      *os << "\\a";
+      break;
+    case L'\b':
+      *os << "\\b";
+      break;
+    case L'\f':
+      *os << "\\f";
+      break;
+    case L'\n':
+      *os << "\\n";
+      break;
+    case L'\r':
+      *os << "\\r";
+      break;
+    case L'\t':
+      *os << "\\t";
+      break;
+    case L'\v':
+      *os << "\\v";
+      break;
+    default:
+      if (IsPrintableAscii(c)) {
+        *os << static_cast<char>(c);
+        return kAsIs;
+      } else {
+        *os << "\\x" + String::FormatHexInt(static_cast<UnsignedChar>(c));
+        return kHexEscape;
+      }
+  }
+  return kSpecialEscape;
+}
+
+// Prints a wchar_t c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) {
+  switch (c) {
+    case L'\'':
+      *os << "'";
+      return kAsIs;
+    case L'"':
+      *os << "\\\"";
+      return kSpecialEscape;
+    default:
+      return PrintAsCharLiteralTo<wchar_t>(c, os);
+  }
+}
+
+// Prints a char c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(char c, ostream* os) {
+  return PrintAsStringLiteralTo(
+      static_cast<wchar_t>(static_cast<unsigned char>(c)), os);
+}
+
+// Prints a wide or narrow character c and its code.  '\0' is printed
+// as "'\\0'", other unprintable characters are also properly escaped
+// using the standard C++ escape sequence.  The template argument
+// UnsignedChar is the unsigned version of Char, which is the type of c.
+template <typename UnsignedChar, typename Char>
+void PrintCharAndCodeTo(Char c, ostream* os) {
+  // First, print c as a literal in the most readable form we can find.
+  *os << ((sizeof(c) > 1) ? "L'" : "'");
+  const CharFormat format = PrintAsCharLiteralTo<UnsignedChar>(c, os);
+  *os << "'";
+
+  // To aid user debugging, we also print c's code in decimal, unless
+  // it's 0 (in which case c was printed as '\\0', making the code
+  // obvious).
+  if (c == 0)
+    return;
+  *os << " (" << static_cast<int>(c);
+
+  // For more convenience, we print c's code again in hexidecimal,
+  // unless c was already printed in the form '\x##' or the code is in
+  // [1, 9].
+  if (format == kHexEscape || (1 <= c && c <= 9)) {
+    // Do nothing.
+  } else {
+    *os << ", 0x" << String::FormatHexInt(static_cast<UnsignedChar>(c));
+  }
+  *os << ")";
+}
+
+void PrintTo(unsigned char c, ::std::ostream* os) {
+  PrintCharAndCodeTo<unsigned char>(c, os);
+}
+void PrintTo(signed char c, ::std::ostream* os) {
+  PrintCharAndCodeTo<unsigned char>(c, os);
+}
+
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its code.  L'\0' is printed as "L'\\0'".
+void PrintTo(wchar_t wc, ostream* os) {
+  PrintCharAndCodeTo<wchar_t>(wc, os);
+}
+
+// Prints the given array of characters to the ostream.  CharType must be either
+// char or wchar_t.
+// The array starts at begin, the length is len, it may include '\0' characters
+// and may not be NUL-terminated.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static void PrintCharsAsStringTo(
+    const CharType* begin, size_t len, ostream* os) {
+  const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\"";
+  *os << kQuoteBegin;
+  bool is_previous_hex = false;
+  for (size_t index = 0; index < len; ++index) {
+    const CharType cur = begin[index];
+    if (is_previous_hex && IsXDigit(cur)) {
+      // Previous character is of '\x..' form and this character can be
+      // interpreted as another hexadecimal digit in its number. Break string to
+      // disambiguate.
+      *os << "\" " << kQuoteBegin;
+    }
+    is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape;
+  }
+  *os << "\"";
+}
+
+// Prints a (const) char/wchar_t array of 'len' elements, starting at address
+// 'begin'.  CharType must be either char or wchar_t.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static void UniversalPrintCharArray(
+    const CharType* begin, size_t len, ostream* os) {
+  // The code
+  //   const char kFoo[] = "foo";
+  // generates an array of 4, not 3, elements, with the last one being '\0'.
+  //
+  // Therefore when printing a char array, we don't print the last element if
+  // it's '\0', such that the output matches the string literal as it's
+  // written in the source code.
+  if (len > 0 && begin[len - 1] == '\0') {
+    PrintCharsAsStringTo(begin, len - 1, os);
+    return;
+  }
+
+  // If, however, the last element in the array is not '\0', e.g.
+  //    const char kFoo[] = { 'f', 'o', 'o' };
+  // we must print the entire array.  We also print a message to indicate
+  // that the array is not NUL-terminated.
+  PrintCharsAsStringTo(begin, len, os);
+  *os << " (no terminating NUL)";
+}
+
+// Prints a (const) char array of 'len' elements, starting at address 'begin'.
+void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
+  UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints a (const) wchar_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) {
+  UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints the given C string to the ostream.
+void PrintTo(const char* s, ostream* os) {
+  if (s == NULL) {
+    *os << "NULL";
+  } else {
+    *os << ImplicitCast_<const void*>(s) << " pointing to ";
+    PrintCharsAsStringTo(s, strlen(s), os);
+  }
+}
+
+// MSVC compiler can be configured to define whar_t as a typedef
+// of unsigned short. Defining an overload for const wchar_t* in that case
+// would cause pointers to unsigned shorts be printed as wide strings,
+// possibly accessing more memory than intended and causing invalid
+// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when
+// wchar_t is implemented as a native type.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Prints the given wide C string to the ostream.
+void PrintTo(const wchar_t* s, ostream* os) {
+  if (s == NULL) {
+    *os << "NULL";
+  } else {
+    *os << ImplicitCast_<const void*>(s) << " pointing to ";
+    PrintCharsAsStringTo(s, std::wcslen(s), os);
+  }
+}
+#endif  // wchar_t is native
+
+// Prints a ::string object.
+#if GTEST_HAS_GLOBAL_STRING
+void PrintStringTo(const ::string& s, ostream* os) {
+  PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+void PrintStringTo(const ::std::string& s, ostream* os) {
+  PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+
+// Prints a ::wstring object.
+#if GTEST_HAS_GLOBAL_WSTRING
+void PrintWideStringTo(const ::wstring& s, ostream* os) {
+  PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
+  PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif  // GTEST_HAS_STD_WSTRING
+
+}  // namespace internal
+
+}  // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// The Google C++ Testing Framework (Google Test)
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation.  It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error.  This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+using internal::GetUnitTestImpl;
+
+// Gets the summary of the failure message by omitting the stack trace
+// in it.
+std::string TestPartResult::ExtractSummary(const char* message) {
+  const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
+  return stack_trace == NULL ? message :
+      std::string(message, stack_trace);
+}
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
+  return os
+      << result.file_name() << ":" << result.line_number() << ": "
+      << (result.type() == TestPartResult::kSuccess ? "Success" :
+          result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
+          "Non-fatal failure") << ":\n"
+      << result.message() << std::endl;
+}
+
+// Appends a TestPartResult to the array.
+void TestPartResultArray::Append(const TestPartResult& result) {
+  array_.push_back(result);
+}
+
+// Returns the TestPartResult at the given index (0-based).
+const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
+  if (index < 0 || index >= size()) {
+    printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
+    internal::posix::Abort();
+  }
+
+  return array_[index];
+}
+
+// Returns the number of TestPartResult objects in the array.
+int TestPartResultArray::size() const {
+  return static_cast<int>(array_.size());
+}
+
+namespace internal {
+
+HasNewFatalFailureHelper::HasNewFatalFailureHelper()
+    : has_new_fatal_failure_(false),
+      original_reporter_(GetUnitTestImpl()->
+                         GetTestPartResultReporterForCurrentThread()) {
+  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
+}
+
+HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
+  GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
+      original_reporter_);
+}
+
+void HasNewFatalFailureHelper::ReportTestPartResult(
+    const TestPartResult& result) {
+  if (result.fatally_failed())
+    has_new_fatal_failure_ = true;
+  original_reporter_->ReportTestPartResult(result);
+}
+
+}  // namespace internal
+
+}  // namespace testing
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+  while (IsSpace(*str))
+    str++;
+  return str;
+}
+
+static std::vector<std::string> SplitIntoTestNames(const char* src) {
+  std::vector<std::string> name_vec;
+  src = SkipSpaces(src);
+  for (; src != NULL; src = SkipComma(src)) {
+    name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));
+  }
+  return name_vec;
+}
+
+// Verifies that registered_tests match the test names in
+// registered_tests_; returns registered_tests if successful, or
+// aborts the program otherwise.
+const char* TypedTestCasePState::VerifyRegisteredTestNames(
+    const char* file, int line, const char* registered_tests) {
+  typedef RegisteredTestsMap::const_iterator RegisteredTestIter;
+  registered_ = true;
+
+  std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);
+
+  Message errors;
+
+  std::set<std::string> tests;
+  for (std::vector<std::string>::const_iterator name_it = name_vec.begin();
+       name_it != name_vec.end(); ++name_it) {
+    const std::string& name = *name_it;
+    if (tests.count(name) != 0) {
+      errors << "Test " << name << " is listed more than once.\n";
+      continue;
+    }
+
+    bool found = false;
+    for (RegisteredTestIter it = registered_tests_.begin();
+         it != registered_tests_.end();
+         ++it) {
+      if (name == it->first) {
+        found = true;
+        break;
+      }
+    }
+
+    if (found) {
+      tests.insert(name);
+    } else {
+      errors << "No test named " << name
+             << " can be found in this test case.\n";
+    }
+  }
+
+  for (RegisteredTestIter it = registered_tests_.begin();
+       it != registered_tests_.end();
+       ++it) {
+    if (tests.count(it->first) == 0) {
+      errors << "You forgot to list test " << it->first << ".\n";
+    }
+  }
+
+  const std::string& errors_str = errors.GetString();
+  if (errors_str != "") {
+    fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+            errors_str.c_str());
+    fflush(stderr);
+    posix::Abort();
+  }
+
+  return registered_tests;
+}
+
+#endif  // GTEST_HAS_TYPED_TEST_P
+
+}  // namespace internal
+}  // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Mocking Framework (Google Mock)
+//
+// This file #includes all Google Mock implementation .cc files.  The
+// purpose is to allow a user to build Google Mock by compiling this
+// file alone.
+
+// This line ensures that gmock.h can be compiled on its own, even
+// when it's fused.
+#include "gmock/gmock.h"
+
+// The following lines pull in the real gmock *.cc files.
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements cardinalities.
+
+
+#include <limits.h>
+#include <ostream>  // NOLINT
+#include <sstream>
+#include <string>
+
+namespace testing {
+
+namespace {
+
+// Implements the Between(m, n) cardinality.
+class BetweenCardinalityImpl : public CardinalityInterface {
+ public:
+  BetweenCardinalityImpl(int min, int max)
+      : min_(min >= 0 ? min : 0),
+        max_(max >= min_ ? max : min_) {
+    std::stringstream ss;
+    if (min < 0) {
+      ss << "The invocation lower bound must be >= 0, "
+         << "but is actually " << min << ".";
+      internal::Expect(false, __FILE__, __LINE__, ss.str());
+    } else if (max < 0) {
+      ss << "The invocation upper bound must be >= 0, "
+         << "but is actually " << max << ".";
+      internal::Expect(false, __FILE__, __LINE__, ss.str());
+    } else if (min > max) {
+      ss << "The invocation upper bound (" << max
+         << ") must be >= the invocation lower bound (" << min
+         << ").";
+      internal::Expect(false, __FILE__, __LINE__, ss.str());
+    }
+  }
+
+  // Conservative estimate on the lower/upper bound of the number of
+  // calls allowed.
+  virtual int ConservativeLowerBound() const { return min_; }
+  virtual int ConservativeUpperBound() const { return max_; }
+
+  virtual bool IsSatisfiedByCallCount(int call_count) const {
+    return min_ <= call_count && call_count <= max_;
+  }
+
+  virtual bool IsSaturatedByCallCount(int call_count) const {
+    return call_count >= max_;
+  }
+
+  virtual void DescribeTo(::std::ostream* os) const;
+
+ private:
+  const int min_;
+  const int max_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(BetweenCardinalityImpl);
+};
+
+// Formats "n times" in a human-friendly way.
+inline internal::string FormatTimes(int n) {
+  if (n == 1) {
+    return "once";
+  } else if (n == 2) {
+    return "twice";
+  } else {
+    std::stringstream ss;
+    ss << n << " times";
+    return ss.str();
+  }
+}
+
+// Describes the Between(m, n) cardinality in human-friendly text.
+void BetweenCardinalityImpl::DescribeTo(::std::ostream* os) const {
+  if (min_ == 0) {
+    if (max_ == 0) {
+      *os << "never called";
+    } else if (max_ == INT_MAX) {
+      *os << "called any number of times";
+    } else {
+      *os << "called at most " << FormatTimes(max_);
+    }
+  } else if (min_ == max_) {
+    *os << "called " << FormatTimes(min_);
+  } else if (max_ == INT_MAX) {
+    *os << "called at least " << FormatTimes(min_);
+  } else {
+    // 0 < min_ < max_ < INT_MAX
+    *os << "called between " << min_ << " and " << max_ << " times";
+  }
+}
+
+}  // Unnamed namespace
+
+// Describes the given call count to an ostream.
+void Cardinality::DescribeActualCallCountTo(int actual_call_count,
+                                            ::std::ostream* os) {
+  if (actual_call_count > 0) {
+    *os << "called " << FormatTimes(actual_call_count);
+  } else {
+    *os << "never called";
+  }
+}
+
+// Creates a cardinality that allows at least n calls.
+GTEST_API_ Cardinality AtLeast(int n) { return Between(n, INT_MAX); }
+
+// Creates a cardinality that allows at most n calls.
+GTEST_API_ Cardinality AtMost(int n) { return Between(0, n); }
+
+// Creates a cardinality that allows any number of calls.
+GTEST_API_ Cardinality AnyNumber() { return AtLeast(0); }
+
+// Creates a cardinality that allows between min and max calls.
+GTEST_API_ Cardinality Between(int min, int max) {
+  return Cardinality(new BetweenCardinalityImpl(min, max));
+}
+
+// Creates a cardinality that allows exactly n calls.
+GTEST_API_ Cardinality Exactly(int n) { return Between(n, n); }
+
+}  // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file defines some utilities useful for implementing Google
+// Mock.  They are subject to change without notice, so please DO NOT
+// USE THEM IN USER CODE.
+
+
+#include <ctype.h>
+#include <ostream>  // NOLINT
+#include <string>
+
+namespace testing {
+namespace internal {
+
+// Converts an identifier name to a space-separated list of lower-case
+// words.  Each maximum substring of the form [A-Za-z][a-z]*|\d+ is
+// treated as one word.  For example, both "FooBar123" and
+// "foo_bar_123" are converted to "foo bar 123".
+GTEST_API_ string ConvertIdentifierNameToWords(const char* id_name) {
+  string result;
+  char prev_char = '\0';
+  for (const char* p = id_name; *p != '\0'; prev_char = *(p++)) {
+    // We don't care about the current locale as the input is
+    // guaranteed to be a valid C++ identifier name.
+    const bool starts_new_word = IsUpper(*p) ||
+        (!IsAlpha(prev_char) && IsLower(*p)) ||
+        (!IsDigit(prev_char) && IsDigit(*p));
+
+    if (IsAlNum(*p)) {
+      if (starts_new_word && result != "")
+        result += ' ';
+      result += ToLower(*p);
+    }
+  }
+  return result;
+}
+
+// This class reports Google Mock failures as Google Test failures.  A
+// user can define another class in a similar fashion if he intends to
+// use Google Mock with a testing framework other than Google Test.
+class GoogleTestFailureReporter : public FailureReporterInterface {
+ public:
+  virtual void ReportFailure(FailureType type, const char* file, int line,
+                             const string& message) {
+    AssertHelper(type == kFatal ?
+                 TestPartResult::kFatalFailure :
+                 TestPartResult::kNonFatalFailure,
+                 file,
+                 line,
+                 message.c_str()) = Message();
+    if (type == kFatal) {
+      posix::Abort();
+    }
+  }
+};
+
+// Returns the global failure reporter.  Will create a
+// GoogleTestFailureReporter and return it the first time called.
+GTEST_API_ FailureReporterInterface* GetFailureReporter() {
+  // Points to the global failure reporter used by Google Mock.  gcc
+  // guarantees that the following use of failure_reporter is
+  // thread-safe.  We may need to add additional synchronization to
+  // protect failure_reporter if we port Google Mock to other
+  // compilers.
+  static FailureReporterInterface* const failure_reporter =
+      new GoogleTestFailureReporter();
+  return failure_reporter;
+}
+
+// Protects global resources (stdout in particular) used by Log().
+static GTEST_DEFINE_STATIC_MUTEX_(g_log_mutex);
+
+// Returns true iff a log with the given severity is visible according
+// to the --gmock_verbose flag.
+GTEST_API_ bool LogIsVisible(LogSeverity severity) {
+  if (GMOCK_FLAG(verbose) == kInfoVerbosity) {
+    // Always show the log if --gmock_verbose=info.
+    return true;
+  } else if (GMOCK_FLAG(verbose) == kErrorVerbosity) {
+    // Always hide it if --gmock_verbose=error.
+    return false;
+  } else {
+    // If --gmock_verbose is neither "info" nor "error", we treat it
+    // as "warning" (its default value).
+    return severity == kWarning;
+  }
+}
+
+// Prints the given message to stdout iff 'severity' >= the level
+// specified by the --gmock_verbose flag.  If stack_frames_to_skip >=
+// 0, also prints the stack trace excluding the top
+// stack_frames_to_skip frames.  In opt mode, any positive
+// stack_frames_to_skip is treated as 0, since we don't know which
+// function calls will be inlined by the compiler and need to be
+// conservative.
+GTEST_API_ void Log(LogSeverity severity,
+                    const string& message,
+                    int stack_frames_to_skip) {
+  if (!LogIsVisible(severity))
+    return;
+
+  // Ensures that logs from different threads don't interleave.
+  MutexLock l(&g_log_mutex);
+
+  // "using ::std::cout;" doesn't work with Symbian's STLport, where cout is a
+  // macro.
+
+  if (severity == kWarning) {
+    // Prints a GMOCK WARNING marker to make the warnings easily searchable.
+    std::cout << "\nGMOCK WARNING:";
+  }
+  // Pre-pends a new-line to message if it doesn't start with one.
+  if (message.empty() || message[0] != '\n') {
+    std::cout << "\n";
+  }
+  std::cout << message;
+  if (stack_frames_to_skip >= 0) {
+#ifdef NDEBUG
+    // In opt mode, we have to be conservative and skip no stack frame.
+    const int actual_to_skip = 0;
+#else
+    // In dbg mode, we can do what the caller tell us to do (plus one
+    // for skipping this function's stack frame).
+    const int actual_to_skip = stack_frames_to_skip + 1;
+#endif  // NDEBUG
+
+    // Appends a new-line to message if it doesn't end with one.
+    if (!message.empty() && *message.rbegin() != '\n') {
+      std::cout << "\n";
+    }
+    std::cout << "Stack trace:\n"
+         << ::testing::internal::GetCurrentOsStackTraceExceptTop(
+             ::testing::UnitTest::GetInstance(), actual_to_skip);
+  }
+  std::cout << ::std::flush;
+}
+
+}  // namespace internal
+}  // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements Matcher<const string&>, Matcher<string>, and
+// utilities for defining matchers.
+
+
+#include <string.h>
+#include <sstream>
+#include <string>
+
+namespace testing {
+
+// Constructs a matcher that matches a const string& whose value is
+// equal to s.
+Matcher<const internal::string&>::Matcher(const internal::string& s) {
+  *this = Eq(s);
+}
+
+// Constructs a matcher that matches a const string& whose value is
+// equal to s.
+Matcher<const internal::string&>::Matcher(const char* s) {
+  *this = Eq(internal::string(s));
+}
+
+// Constructs a matcher that matches a string whose value is equal to s.
+Matcher<internal::string>::Matcher(const internal::string& s) { *this = Eq(s); }
+
+// Constructs a matcher that matches a string whose value is equal to s.
+Matcher<internal::string>::Matcher(const char* s) {
+  *this = Eq(internal::string(s));
+}
+
+#if GTEST_HAS_STRING_PIECE_
+// Constructs a matcher that matches a const StringPiece& whose value is
+// equal to s.
+Matcher<const StringPiece&>::Matcher(const internal::string& s) {
+  *this = Eq(s);
+}
+
+// Constructs a matcher that matches a const StringPiece& whose value is
+// equal to s.
+Matcher<const StringPiece&>::Matcher(const char* s) {
+  *this = Eq(internal::string(s));
+}
+
+// Constructs a matcher that matches a const StringPiece& whose value is
+// equal to s.
+Matcher<const StringPiece&>::Matcher(StringPiece s) {
+  *this = Eq(s.ToString());
+}
+
+// Constructs a matcher that matches a StringPiece whose value is equal to s.
+Matcher<StringPiece>::Matcher(const internal::string& s) {
+  *this = Eq(s);
+}
+
+// Constructs a matcher that matches a StringPiece whose value is equal to s.
+Matcher<StringPiece>::Matcher(const char* s) {
+  *this = Eq(internal::string(s));
+}
+
+// Constructs a matcher that matches a StringPiece whose value is equal to s.
+Matcher<StringPiece>::Matcher(StringPiece s) {
+  *this = Eq(s.ToString());
+}
+#endif  // GTEST_HAS_STRING_PIECE_
+
+namespace internal {
+
+// Joins a vector of strings as if they are fields of a tuple; returns
+// the joined string.
+GTEST_API_ string JoinAsTuple(const Strings& fields) {
+  switch (fields.size()) {
+    case 0:
+      return "";
+    case 1:
+      return fields[0];
+    default:
+      string result = "(" + fields[0];
+      for (size_t i = 1; i < fields.size(); i++) {
+        result += ", ";
+        result += fields[i];
+      }
+      result += ")";
+      return result;
+  }
+}
+
+// Returns the description for a matcher defined using the MATCHER*()
+// macro where the user-supplied description string is "", if
+// 'negation' is false; otherwise returns the description of the
+// negation of the matcher.  'param_values' contains a list of strings
+// that are the print-out of the matcher's parameters.
+GTEST_API_ string FormatMatcherDescription(bool negation,
+                                           const char* matcher_name,
+                                           const Strings& param_values) {
+  string result = ConvertIdentifierNameToWords(matcher_name);
+  if (param_values.size() >= 1)
+    result += " " + JoinAsTuple(param_values);
+  return negation ? "not (" + result + ")" : result;
+}
+
+// FindMaxBipartiteMatching and its helper class.
+//
+// Uses the well-known Ford-Fulkerson max flow method to find a maximum
+// bipartite matching. Flow is considered to be from left to right.
+// There is an implicit source node that is connected to all of the left
+// nodes, and an implicit sink node that is connected to all of the
+// right nodes. All edges have unit capacity.
+//
+// Neither the flow graph nor the residual flow graph are represented
+// explicitly. Instead, they are implied by the information in 'graph' and
+// a vector<int> called 'left_' whose elements are initialized to the
+// value kUnused. This represents the initial state of the algorithm,
+// where the flow graph is empty, and the residual flow graph has the
+// following edges:
+//   - An edge from source to each left_ node
+//   - An edge from each right_ node to sink
+//   - An edge from each left_ node to each right_ node, if the
+//     corresponding edge exists in 'graph'.
+//
+// When the TryAugment() method adds a flow, it sets left_[l] = r for some
+// nodes l and r. This induces the following changes:
+//   - The edges (source, l), (l, r), and (r, sink) are added to the
+//     flow graph.
+//   - The same three edges are removed from the residual flow graph.
+//   - The reverse edges (l, source), (r, l), and (sink, r) are added
+//     to the residual flow graph, which is a directional graph
+//     representing unused flow capacity.
+//
+// When the method augments a flow (moving left_[l] from some r1 to some
+// other r2), this can be thought of as "undoing" the above steps with
+// respect to r1 and "redoing" them with respect to r2.
+//
+// It bears repeating that the flow graph and residual flow graph are
+// never represented explicitly, but can be derived by looking at the
+// information in 'graph' and in left_.
+//
+// As an optimization, there is a second vector<int> called right_ which
+// does not provide any new information. Instead, it enables more
+// efficient queries about edges entering or leaving the right-side nodes
+// of the flow or residual flow graphs. The following invariants are
+// maintained:
+//
+// left[l] == kUnused or right[left[l]] == l
+// right[r] == kUnused or left[right[r]] == r
+//
+// . [ source ]                                        .
+// .   |||                                             .
+// .   |||                                             .
+// .   ||\--> left[0]=1  ---\    right[0]=-1 ----\     .
+// .   ||                   |                    |     .
+// .   |\---> left[1]=-1    \--> right[1]=0  ---\|     .
+// .   |                                        ||     .
+// .   \----> left[2]=2  ------> right[2]=2  --\||     .
+// .                                           |||     .
+// .         elements           matchers       vvv     .
+// .                                         [ sink ]  .
+//
+// See Also:
+//   [1] Cormen, et al (2001). "Section 26.2: The Ford-Fulkerson method".
+//       "Introduction to Algorithms (Second ed.)", pp. 651-664.
+//   [2] "Ford-Fulkerson algorithm", Wikipedia,
+//       'http://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm'
+class MaxBipartiteMatchState {
+ public:
+  explicit MaxBipartiteMatchState(const MatchMatrix& graph)
+      : graph_(&graph),
+        left_(graph_->LhsSize(), kUnused),
+        right_(graph_->RhsSize(), kUnused) {
+  }
+
+  // Returns the edges of a maximal match, each in the form {left, right}.
+  ElementMatcherPairs Compute() {
+    // 'seen' is used for path finding { 0: unseen, 1: seen }.
+    ::std::vector<char> seen;
+    // Searches the residual flow graph for a path from each left node to
+    // the sink in the residual flow graph, and if one is found, add flow
+    // to the graph. It's okay to search through the left nodes once. The
+    // edge from the implicit source node to each previously-visited left
+    // node will have flow if that left node has any path to the sink
+    // whatsoever. Subsequent augmentations can only add flow to the
+    // network, and cannot take away that previous flow unit from the source.
+    // Since the source-to-left edge can only carry one flow unit (or,
+    // each element can be matched to only one matcher), there is no need
+    // to visit the left nodes more than once looking for augmented paths.
+    // The flow is known to be possible or impossible by looking at the
+    // node once.
+    for (size_t ilhs = 0; ilhs < graph_->LhsSize(); ++ilhs) {
+      // Reset the path-marking vector and try to find a path from
+      // source to sink starting at the left_[ilhs] node.
+      GTEST_CHECK_(left_[ilhs] == kUnused)
+          << "ilhs: " << ilhs << ", left_[ilhs]: " << left_[ilhs];
+      // 'seen' initialized to 'graph_->RhsSize()' copies of 0.
+      seen.assign(graph_->RhsSize(), 0);
+      TryAugment(ilhs, &seen);
+    }
+    ElementMatcherPairs result;
+    for (size_t ilhs = 0; ilhs < left_.size(); ++ilhs) {
+      size_t irhs = left_[ilhs];
+      if (irhs == kUnused) continue;
+      result.push_back(ElementMatcherPair(ilhs, irhs));
+    }
+    return result;
+  }
+
+ private:
+  static const size_t kUnused = static_cast<size_t>(-1);
+
+  // Perform a depth-first search from left node ilhs to the sink.  If a
+  // path is found, flow is added to the network by linking the left and
+  // right vector elements corresponding each segment of the path.
+  // Returns true if a path to sink was found, which means that a unit of
+  // flow was added to the network. The 'seen' vector elements correspond
+  // to right nodes and are marked to eliminate cycles from the search.
+  //
+  // Left nodes will only be explored at most once because they
+  // are accessible from at most one right node in the residual flow
+  // graph.
+  //
+  // Note that left_[ilhs] is the only element of left_ that TryAugment will
+  // potentially transition from kUnused to another value. Any other
+  // left_ element holding kUnused before TryAugment will be holding it
+  // when TryAugment returns.
+  //
+  bool TryAugment(size_t ilhs, ::std::vector<char>* seen) {
+    for (size_t irhs = 0; irhs < graph_->RhsSize(); ++irhs) {
+      if ((*seen)[irhs])
+        continue;
+      if (!graph_->HasEdge(ilhs, irhs))
+        continue;
+      // There's an available edge from ilhs to irhs.
+      (*seen)[irhs] = 1;
+      // Next a search is performed to determine whether
+      // this edge is a dead end or leads to the sink.
+      //
+      // right_[irhs] == kUnused means that there is residual flow from
+      // right node irhs to the sink, so we can use that to finish this
+      // flow path and return success.
+      //
+      // Otherwise there is residual flow to some ilhs. We push flow
+      // along that path and call ourselves recursively to see if this
+      // ultimately leads to sink.
+      if (right_[irhs] == kUnused || TryAugment(right_[irhs], seen)) {
+        // Add flow from left_[ilhs] to right_[irhs].
+        left_[ilhs] = irhs;
+        right_[irhs] = ilhs;
+        return true;
+      }
+    }
+    return false;
+  }
+
+  const MatchMatrix* graph_;  // not owned
+  // Each element of the left_ vector represents a left hand side node
+  // (i.e. an element) and each element of right_ is a right hand side
+  // node (i.e. a matcher). The values in the left_ vector indicate
+  // outflow from that node to a node on the the right_ side. The values
+  // in the right_ indicate inflow, and specify which left_ node is
+  // feeding that right_ node, if any. For example, left_[3] == 1 means
+  // there's a flow from element #3 to matcher #1. Such a flow would also
+  // be redundantly represented in the right_ vector as right_[1] == 3.
+  // Elements of left_ and right_ are either kUnused or mutually
+  // referent. Mutually referent means that left_[right_[i]] = i and
+  // right_[left_[i]] = i.
+  ::std::vector<size_t> left_;
+  ::std::vector<size_t> right_;
+
+  GTEST_DISALLOW_ASSIGN_(MaxBipartiteMatchState);
+};
+
+const size_t MaxBipartiteMatchState::kUnused;
+
+GTEST_API_ ElementMatcherPairs
+FindMaxBipartiteMatching(const MatchMatrix& g) {
+  return MaxBipartiteMatchState(g).Compute();
+}
+
+static void LogElementMatcherPairVec(const ElementMatcherPairs& pairs,
+                                     ::std::ostream* stream) {
+  typedef ElementMatcherPairs::const_iterator Iter;
+  ::std::ostream& os = *stream;
+  os << "{";
+  const char *sep = "";
+  for (Iter it = pairs.begin(); it != pairs.end(); ++it) {
+    os << sep << "\n  ("
+       << "element #" << it->first << ", "
+       << "matcher #" << it->second << ")";
+    sep = ",";
+  }
+  os << "\n}";
+}
+
+// Tries to find a pairing, and explains the result.
+GTEST_API_ bool FindPairing(const MatchMatrix& matrix,
+                            MatchResultListener* listener) {
+  ElementMatcherPairs matches = FindMaxBipartiteMatching(matrix);
+
+  size_t max_flow = matches.size();
+  bool result = (max_flow == matrix.RhsSize());
+
+  if (!result) {
+    if (listener->IsInterested()) {
+      *listener << "where no permutation of the elements can "
+                   "satisfy all matchers, and the closest match is "
+                << max_flow << " of " << matrix.RhsSize()
+                << " matchers with the pairings:\n";
+      LogElementMatcherPairVec(matches, listener->stream());
+    }
+    return false;
+  }
+
+  if (matches.size() > 1) {
+    if (listener->IsInterested()) {
+      const char *sep = "where:\n";
+      for (size_t mi = 0; mi < matches.size(); ++mi) {
+        *listener << sep << " - element #" << matches[mi].first
+                  << " is matched by matcher #" << matches[mi].second;
+        sep = ",\n";
+      }
+    }
+  }
+  return true;
+}
+
+bool MatchMatrix::NextGraph() {
+  for (size_t ilhs = 0; ilhs < LhsSize(); ++ilhs) {
+    for (size_t irhs = 0; irhs < RhsSize(); ++irhs) {
+      char& b = matched_[SpaceIndex(ilhs, irhs)];
+      if (!b) {
+        b = 1;
+        return true;
+      }
+      b = 0;
+    }
+  }
+  return false;
+}
+
+void MatchMatrix::Randomize() {
+  for (size_t ilhs = 0; ilhs < LhsSize(); ++ilhs) {
+    for (size_t irhs = 0; irhs < RhsSize(); ++irhs) {
+      char& b = matched_[SpaceIndex(ilhs, irhs)];
+      b = static_cast<char>(rand() & 1);  // NOLINT
+    }
+  }
+}
+
+string MatchMatrix::DebugString() const {
+  ::std::stringstream ss;
+  const char *sep = "";
+  for (size_t i = 0; i < LhsSize(); ++i) {
+    ss << sep;
+    for (size_t j = 0; j < RhsSize(); ++j) {
+      ss << HasEdge(i, j);
+    }
+    sep = ";";
+  }
+  return ss.str();
+}
+
+void UnorderedElementsAreMatcherImplBase::DescribeToImpl(
+    ::std::ostream* os) const {
+  if (matcher_describers_.empty()) {
+    *os << "is empty";
+    return;
+  }
+  if (matcher_describers_.size() == 1) {
+    *os << "has " << Elements(1) << " and that element ";
+    matcher_describers_[0]->DescribeTo(os);
+    return;
+  }
+  *os << "has " << Elements(matcher_describers_.size())
+      << " and there exists some permutation of elements such that:\n";
+  const char* sep = "";
+  for (size_t i = 0; i != matcher_describers_.size(); ++i) {
+    *os << sep << " - element #" << i << " ";
+    matcher_describers_[i]->DescribeTo(os);
+    sep = ", and\n";
+  }
+}
+
+void UnorderedElementsAreMatcherImplBase::DescribeNegationToImpl(
+    ::std::ostream* os) const {
+  if (matcher_describers_.empty()) {
+    *os << "isn't empty";
+    return;
+  }
+  if (matcher_describers_.size() == 1) {
+    *os << "doesn't have " << Elements(1)
+        << ", or has " << Elements(1) << " that ";
+    matcher_describers_[0]->DescribeNegationTo(os);
+    return;
+  }
+  *os << "doesn't have " << Elements(matcher_describers_.size())
+      << ", or there exists no permutation of elements such that:\n";
+  const char* sep = "";
+  for (size_t i = 0; i != matcher_describers_.size(); ++i) {
+    *os << sep << " - element #" << i << " ";
+    matcher_describers_[i]->DescribeTo(os);
+    sep = ", and\n";
+  }
+}
+
+// Checks that all matchers match at least one element, and that all
+// elements match at least one matcher. This enables faster matching
+// and better error reporting.
+// Returns false, writing an explanation to 'listener', if and only
+// if the success criteria are not met.
+bool UnorderedElementsAreMatcherImplBase::
+VerifyAllElementsAndMatchersAreMatched(
+    const ::std::vector<string>& element_printouts,
+    const MatchMatrix& matrix,
+    MatchResultListener* listener) const {
+  bool result = true;
+  ::std::vector<char> element_matched(matrix.LhsSize(), 0);
+  ::std::vector<char> matcher_matched(matrix.RhsSize(), 0);
+
+  for (size_t ilhs = 0; ilhs < matrix.LhsSize(); ilhs++) {
+    for (size_t irhs = 0; irhs < matrix.RhsSize(); irhs++) {
+      char matched = matrix.HasEdge(ilhs, irhs);
+      element_matched[ilhs] |= matched;
+      matcher_matched[irhs] |= matched;
+    }
+  }
+
+  {
+    const char* sep =
+        "where the following matchers don't match any elements:\n";
+    for (size_t mi = 0; mi < matcher_matched.size(); ++mi) {
+      if (matcher_matched[mi])
+        continue;
+      result = false;
+      if (listener->IsInterested()) {
+        *listener << sep << "matcher #" << mi << ": ";
+        matcher_describers_[mi]->DescribeTo(listener->stream());
+        sep = ",\n";
+      }
+    }
+  }
+
+  {
+    const char* sep =
+        "where the following elements don't match any matchers:\n";
+    const char* outer_sep = "";
+    if (!result) {
+      outer_sep = "\nand ";
+    }
+    for (size_t ei = 0; ei < element_matched.size(); ++ei) {
+      if (element_matched[ei])
+        continue;
+      result = false;
+      if (listener->IsInterested()) {
+        *listener << outer_sep << sep << "element #" << ei << ": "
+                  << element_printouts[ei];
+        sep = ",\n";
+        outer_sep = "";
+      }
+    }
+  }
+  return result;
+}
+
+}  // namespace internal
+}  // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements the spec builder syntax (ON_CALL and
+// EXPECT_CALL).
+
+
+#include <stdlib.h>
+#include <iostream>  // NOLINT
+#include <map>
+#include <set>
+#include <string>
+
+#if GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC
+# include <unistd.h>  // NOLINT
+#endif
+
+namespace testing {
+namespace internal {
+
+// Protects the mock object registry (in class Mock), all function
+// mockers, and all expectations.
+GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_gmock_mutex);
+
+// Logs a message including file and line number information.
+GTEST_API_ void LogWithLocation(testing::internal::LogSeverity severity,
+                                const char* file, int line,
+                                const string& message) {
+  ::std::ostringstream s;
+  s << file << ":" << line << ": " << message << ::std::endl;
+  Log(severity, s.str(), 0);
+}
+
+// Constructs an ExpectationBase object.
+ExpectationBase::ExpectationBase(const char* a_file,
+                                 int a_line,
+                                 const string& a_source_text)
+    : file_(a_file),
+      line_(a_line),
+      source_text_(a_source_text),
+      cardinality_specified_(false),
+      cardinality_(Exactly(1)),
+      call_count_(0),
+      retired_(false),
+      extra_matcher_specified_(false),
+      repeated_action_specified_(false),
+      retires_on_saturation_(false),
+      last_clause_(kNone),
+      action_count_checked_(false) {}
+
+// Destructs an ExpectationBase object.
+ExpectationBase::~ExpectationBase() {}
+
+// Explicitly specifies the cardinality of this expectation.  Used by
+// the subclasses to implement the .Times() clause.
+void ExpectationBase::SpecifyCardinality(const Cardinality& a_cardinality) {
+  cardinality_specified_ = true;
+  cardinality_ = a_cardinality;
+}
+
+// Retires all pre-requisites of this expectation.
+void ExpectationBase::RetireAllPreRequisites()
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+  if (is_retired()) {
+    // We can take this short-cut as we never retire an expectation
+    // until we have retired all its pre-requisites.
+    return;
+  }
+
+  for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+       it != immediate_prerequisites_.end(); ++it) {
+    ExpectationBase* const prerequisite = it->expectation_base().get();
+    if (!prerequisite->is_retired()) {
+      prerequisite->RetireAllPreRequisites();
+      prerequisite->Retire();
+    }
+  }
+}
+
+// Returns true iff all pre-requisites of this expectation have been
+// satisfied.
+bool ExpectationBase::AllPrerequisitesAreSatisfied() const
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+  g_gmock_mutex.AssertHeld();
+  for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+       it != immediate_prerequisites_.end(); ++it) {
+    if (!(it->expectation_base()->IsSatisfied()) ||
+        !(it->expectation_base()->AllPrerequisitesAreSatisfied()))
+      return false;
+  }
+  return true;
+}
+
+// Adds unsatisfied pre-requisites of this expectation to 'result'.
+void ExpectationBase::FindUnsatisfiedPrerequisites(ExpectationSet* result) const
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+  g_gmock_mutex.AssertHeld();
+  for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+       it != immediate_prerequisites_.end(); ++it) {
+    if (it->expectation_base()->IsSatisfied()) {
+      // If *it is satisfied and has a call count of 0, some of its
+      // pre-requisites may not be satisfied yet.
+      if (it->expectation_base()->call_count_ == 0) {
+        it->expectation_base()->FindUnsatisfiedPrerequisites(result);
+      }
+    } else {
+      // Now that we know *it is unsatisfied, we are not so interested
+      // in whether its pre-requisites are satisfied.  Therefore we
+      // don't recursively call FindUnsatisfiedPrerequisites() here.
+      *result += *it;
+    }
+  }
+}
+
+// Describes how many times a function call matching this
+// expectation has occurred.
+void ExpectationBase::DescribeCallCountTo(::std::ostream* os) const
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+  g_gmock_mutex.AssertHeld();
+
+  // Describes how many times the function is expected to be called.
+  *os << "         Expected: to be ";
+  cardinality().DescribeTo(os);
+  *os << "\n           Actual: ";
+  Cardinality::DescribeActualCallCountTo(call_count(), os);
+
+  // Describes the state of the expectation (e.g. is it satisfied?
+  // is it active?).
+  *os << " - " << (IsOverSaturated() ? "over-saturated" :
+                   IsSaturated() ? "saturated" :
+                   IsSatisfied() ? "satisfied" : "unsatisfied")
+      << " and "
+      << (is_retired() ? "retired" : "active");
+}
+
+// Checks the action count (i.e. the number of WillOnce() and
+// WillRepeatedly() clauses) against the cardinality if this hasn't
+// been done before.  Prints a warning if there are too many or too
+// few actions.
+void ExpectationBase::CheckActionCountIfNotDone() const
+    GTEST_LOCK_EXCLUDED_(mutex_) {
+  bool should_check = false;
+  {
+    MutexLock l(&mutex_);
+    if (!action_count_checked_) {
+      action_count_checked_ = true;
+      should_check = true;
+    }
+  }
+
+  if (should_check) {
+    if (!cardinality_specified_) {
+      // The cardinality was inferred - no need to check the action
+      // count against it.
+      return;
+    }
+
+    // The cardinality was explicitly specified.
+    const int action_count = static_cast<int>(untyped_actions_.size());
+    const int upper_bound = cardinality().ConservativeUpperBound();
+    const int lower_bound = cardinality().ConservativeLowerBound();
+    bool too_many;  // True if there are too many actions, or false
+    // if there are too few.
+    if (action_count > upper_bound ||
+        (action_count == upper_bound && repeated_action_specified_)) {
+      too_many = true;
+    } else if (0 < action_count && action_count < lower_bound &&
+               !repeated_action_specified_) {
+      too_many = false;
+    } else {
+      return;
+    }
+
+    ::std::stringstream ss;
+    DescribeLocationTo(&ss);
+    ss << "Too " << (too_many ? "many" : "few")
+       << " actions specified in " << source_text() << "...\n"
+       << "Expected to be ";
+    cardinality().DescribeTo(&ss);
+    ss << ", but has " << (too_many ? "" : "only ")
+       << action_count << " WillOnce()"
+       << (action_count == 1 ? "" : "s");
+    if (repeated_action_specified_) {
+      ss << " and a WillRepeatedly()";
+    }
+    ss << ".";
+    Log(kWarning, ss.str(), -1);  // -1 means "don't print stack trace".
+  }
+}
+
+// Implements the .Times() clause.
+void ExpectationBase::UntypedTimes(const Cardinality& a_cardinality) {
+  if (last_clause_ == kTimes) {
+    ExpectSpecProperty(false,
+                       ".Times() cannot appear "
+                       "more than once in an EXPECT_CALL().");
+  } else {
+    ExpectSpecProperty(last_clause_ < kTimes,
+                       ".Times() cannot appear after "
+                       ".InSequence(), .WillOnce(), .WillRepeatedly(), "
+                       "or .RetiresOnSaturation().");
+  }
+  last_clause_ = kTimes;
+
+  SpecifyCardinality(a_cardinality);
+}
+
+// Points to the implicit sequence introduced by a living InSequence
+// object (if any) in the current thread or NULL.
+GTEST_API_ ThreadLocal<Sequence*> g_gmock_implicit_sequence;
+
+// Reports an uninteresting call (whose description is in msg) in the
+// manner specified by 'reaction'.
+void ReportUninterestingCall(CallReaction reaction, const string& msg) {
+  // Include a stack trace only if --gmock_verbose=info is specified.
+  const int stack_frames_to_skip =
+      GMOCK_FLAG(verbose) == kInfoVerbosity ? 3 : -1;
+  switch (reaction) {
+    case kAllow:
+      Log(kInfo, msg, stack_frames_to_skip);
+      break;
+    case kWarn:
+      Log(kWarning,
+          msg +
+          "\nNOTE: You can safely ignore the above warning unless this "
+          "call should not happen.  Do not suppress it by blindly adding "
+          "an EXPECT_CALL() if you don't mean to enforce the call.  "
+          "See https://github.com/google/googletest/blob/master/googlemock/docs/CookBook.md#"
+          "knowing-when-to-expect for details.\n",
+          stack_frames_to_skip);
+      break;
+    default:  // FAIL
+      Expect(false, NULL, -1, msg);
+  }
+}
+
+UntypedFunctionMockerBase::UntypedFunctionMockerBase()
+    : mock_obj_(NULL), name_("") {}
+
+UntypedFunctionMockerBase::~UntypedFunctionMockerBase() {}
+
+// Sets the mock object this mock method belongs to, and registers
+// this information in the global mock registry.  Will be called
+// whenever an EXPECT_CALL() or ON_CALL() is executed on this mock
+// method.
+void UntypedFunctionMockerBase::RegisterOwner(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+  {
+    MutexLock l(&g_gmock_mutex);
+    mock_obj_ = mock_obj;
+  }
+  Mock::Register(mock_obj, this);
+}
+
+// Sets the mock object this mock method belongs to, and sets the name
+// of the mock function.  Will be called upon each invocation of this
+// mock function.
+void UntypedFunctionMockerBase::SetOwnerAndName(const void* mock_obj,
+                                                const char* name)
+    GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+  // We protect name_ under g_gmock_mutex in case this mock function
+  // is called from two threads concurrently.
+  MutexLock l(&g_gmock_mutex);
+  mock_obj_ = mock_obj;
+  name_ = name;
+}
+
+// Returns the name of the function being mocked.  Must be called
+// after RegisterOwner() or SetOwnerAndName() has been called.
+const void* UntypedFunctionMockerBase::MockObject() const
+    GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+  const void* mock_obj;
+  {
+    // We protect mock_obj_ under g_gmock_mutex in case this mock
+    // function is called from two threads concurrently.
+    MutexLock l(&g_gmock_mutex);
+    Assert(mock_obj_ != NULL, __FILE__, __LINE__,
+           "MockObject() must not be called before RegisterOwner() or "
+           "SetOwnerAndName() has been called.");
+    mock_obj = mock_obj_;
+  }
+  return mock_obj;
+}
+
+// Returns the name of this mock method.  Must be called after
+// SetOwnerAndName() has been called.
+const char* UntypedFunctionMockerBase::Name() const
+    GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+  const char* name;
+  {
+    // We protect name_ under g_gmock_mutex in case this mock
+    // function is called from two threads concurrently.
+    MutexLock l(&g_gmock_mutex);
+    Assert(name_ != NULL, __FILE__, __LINE__,
+           "Name() must not be called before SetOwnerAndName() has "
+           "been called.");
+    name = name_;
+  }
+  return name;
+}
+
+// Calculates the result of invoking this mock function with the given
+// arguments, prints it, and returns it.  The caller is responsible
+// for deleting the result.
+UntypedActionResultHolderBase*
+UntypedFunctionMockerBase::UntypedInvokeWith(const void* const untyped_args)
+    GTEST_LOCK_EXCLUDED_(g_gmock_mutex) {
+  if (untyped_expectations_.size() == 0) {
+    // No expectation is set on this mock method - we have an
+    // uninteresting call.
+
+    // We must get Google Mock's reaction on uninteresting calls
+    // made on this mock object BEFORE performing the action,
+    // because the action may DELETE the mock object and make the
+    // following expression meaningless.
+    const CallReaction reaction =
+        Mock::GetReactionOnUninterestingCalls(MockObject());
+
+    // True iff we need to print this call's arguments and return
+    // value.  This definition must be kept in sync with
+    // the behavior of ReportUninterestingCall().
+    const bool need_to_report_uninteresting_call =
+        // If the user allows this uninteresting call, we print it
+        // only when he wants informational messages.
+        reaction == kAllow ? LogIsVisible(kInfo) :
+        // If the user wants this to be a warning, we print it only
+        // when he wants to see warnings.
+        reaction == kWarn ? LogIsVisible(kWarning) :
+        // Otherwise, the user wants this to be an error, and we
+        // should always print detailed information in the error.
+        true;
+
+    if (!need_to_report_uninteresting_call) {
+      // Perform the action without printing the call information.
+      return this->UntypedPerformDefaultAction(untyped_args, "");
+    }
+
+    // Warns about the uninteresting call.
+    ::std::stringstream ss;
+    this->UntypedDescribeUninterestingCall(untyped_args, &ss);
+
+    // Calculates the function result.
+    UntypedActionResultHolderBase* const result =
+        this->UntypedPerformDefaultAction(untyped_args, ss.str());
+
+    // Prints the function result.
+    if (result != NULL)
+      result->PrintAsActionResult(&ss);
+
+    ReportUninterestingCall(reaction, ss.str());
+    return result;
+  }
+
+  bool is_excessive = false;
+  ::std::stringstream ss;
+  ::std::stringstream why;
+  ::std::stringstream loc;
+  const void* untyped_action = NULL;
+
+  // The UntypedFindMatchingExpectation() function acquires and
+  // releases g_gmock_mutex.
+  const ExpectationBase* const untyped_expectation =
+      this->UntypedFindMatchingExpectation(
+          untyped_args, &untyped_action, &is_excessive,
+          &ss, &why);
+  const bool found = untyped_expectation != NULL;
+
+  // True iff we need to print the call's arguments and return value.
+  // This definition must be kept in sync with the uses of Expect()
+  // and Log() in this function.
+  const bool need_to_report_call =
+      !found || is_excessive || LogIsVisible(kInfo);
+  if (!need_to_report_call) {
+    // Perform the action without printing the call information.
+    return
+        untyped_action == NULL ?
+        this->UntypedPerformDefaultAction(untyped_args, "") :
+        this->UntypedPerformAction(untyped_action, untyped_args);
+  }
+
+  ss << "    Function call: " << Name();
+  this->UntypedPrintArgs(untyped_args, &ss);
+
+  // In case the action deletes a piece of the expectation, we
+  // generate the message beforehand.
+  if (found && !is_excessive) {
+    untyped_expectation->DescribeLocationTo(&loc);
+  }
+
+  UntypedActionResultHolderBase* const result =
+      untyped_action == NULL ?
+      this->UntypedPerformDefaultAction(untyped_args, ss.str()) :
+      this->UntypedPerformAction(untyped_action, untyped_args);
+  if (result != NULL)
+    result->PrintAsActionResult(&ss);
+  ss << "\n" << why.str();
+
+  if (!found) {
+    // No expectation matches this call - reports a failure.
+    Expect(false, NULL, -1, ss.str());
+  } else if (is_excessive) {
+    // We had an upper-bound violation and the failure message is in ss.
+    Expect(false, untyped_expectation->file(),
+           untyped_expectation->line(), ss.str());
+  } else {
+    // We had an expected call and the matching expectation is
+    // described in ss.
+    Log(kInfo, loc.str() + ss.str(), 2);
+  }
+
+  return result;
+}
+
+// Returns an Expectation object that references and co-owns exp,
+// which must be an expectation on this mock function.
+Expectation UntypedFunctionMockerBase::GetHandleOf(ExpectationBase* exp) {
+  for (UntypedExpectations::const_iterator it =
+           untyped_expectations_.begin();
+       it != untyped_expectations_.end(); ++it) {
+    if (it->get() == exp) {
+      return Expectation(*it);
+    }
+  }
+
+  Assert(false, __FILE__, __LINE__, "Cannot find expectation.");
+  return Expectation();
+  // The above statement is just to make the code compile, and will
+  // never be executed.
+}
+
+// Verifies that all expectations on this mock function have been
+// satisfied.  Reports one or more Google Test non-fatal failures
+// and returns false if not.
+bool UntypedFunctionMockerBase::VerifyAndClearExpectationsLocked()
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(g_gmock_mutex) {
+  g_gmock_mutex.AssertHeld();
+  bool expectations_met = true;
+  for (UntypedExpectations::const_iterator it =
+           untyped_expectations_.begin();
+       it != untyped_expectations_.end(); ++it) {
+    ExpectationBase* const untyped_expectation = it->get();
+    if (untyped_expectation->IsOverSaturated()) {
+      // There was an upper-bound violation.  Since the error was
+      // already reported when it occurred, there is no need to do
+      // anything here.
+      expectations_met = false;
+    } else if (!untyped_expectation->IsSatisfied()) {
+      expectations_met = false;
+      ::std::stringstream ss;
+      ss  << "Actual function call count doesn't match "
+          << untyped_expectation->source_text() << "...\n";
+      // No need to show the source file location of the expectation
+      // in the description, as the Expect() call that follows already
+      // takes care of it.
+      untyped_expectation->MaybeDescribeExtraMatcherTo(&ss);
+      untyped_expectation->DescribeCallCountTo(&ss);
+      Expect(false, untyped_expectation->file(),
+             untyped_expectation->line(), ss.str());
+    }
+  }
+
+  // Deleting our expectations may trigger other mock objects to be deleted, for
+  // example if an action contains a reference counted smart pointer to that
+  // mock object, and that is the last reference. So if we delete our
+  // expectations within the context of the global mutex we may deadlock when
+  // this method is called again. Instead, make a copy of the set of
+  // expectations to delete, clear our set within the mutex, and then clear the
+  // copied set outside of it.
+  UntypedExpectations expectations_to_delete;
+  untyped_expectations_.swap(expectations_to_delete);
+
+  g_gmock_mutex.Unlock();
+  expectations_to_delete.clear();
+  g_gmock_mutex.Lock();
+
+  return expectations_met;
+}
+
+}  // namespace internal
+
+// Class Mock.
+
+namespace {
+
+typedef std::set<internal::UntypedFunctionMockerBase*> FunctionMockers;
+
+// The current state of a mock object.  Such information is needed for
+// detecting leaked mock objects and explicitly verifying a mock's
+// expectations.
+struct MockObjectState {
+  MockObjectState()
+      : first_used_file(NULL), first_used_line(-1), leakable(false) {}
+
+  // Where in the source file an ON_CALL or EXPECT_CALL is first
+  // invoked on this mock object.
+  const char* first_used_file;
+  int first_used_line;
+  ::std::string first_used_test_case;
+  ::std::string first_used_test;
+  bool leakable;  // true iff it's OK to leak the object.
+  FunctionMockers function_mockers;  // All registered methods of the object.
+};
+
+// A global registry holding the state of all mock objects that are
+// alive.  A mock object is added to this registry the first time
+// Mock::AllowLeak(), ON_CALL(), or EXPECT_CALL() is called on it.  It
+// is removed from the registry in the mock object's destructor.
+class MockObjectRegistry {
+ public:
+  // Maps a mock object (identified by its address) to its state.
+  typedef std::map<const void*, MockObjectState> StateMap;
+
+  // This destructor will be called when a program exits, after all
+  // tests in it have been run.  By then, there should be no mock
+  // object alive.  Therefore we report any living object as test
+  // failure, unless the user explicitly asked us to ignore it.
+  ~MockObjectRegistry() {
+    // "using ::std::cout;" doesn't work with Symbian's STLport, where cout is
+    // a macro.
+
+    if (!GMOCK_FLAG(catch_leaked_mocks))
+      return;
+
+    int leaked_count = 0;
+    for (StateMap::const_iterator it = states_.begin(); it != states_.end();
+         ++it) {
+      if (it->second.leakable)  // The user said it's fine to leak this object.
+        continue;
+
+      // TODO(wan@google.com): Print the type of the leaked object.
+      // This can help the user identify the leaked object.
+      std::cout << "\n";
+      const MockObjectState& state = it->second;
+      std::cout << internal::FormatFileLocation(state.first_used_file,
+                                                state.first_used_line);
+      std::cout << " ERROR: this mock object";
+      if (state.first_used_test != "") {
+        std::cout << " (used in test " << state.first_used_test_case << "."
+             << state.first_used_test << ")";
+      }
+      std::cout << " should be deleted but never is. Its address is @"
+           << it->first << ".";
+      leaked_count++;
+    }
+    if (leaked_count > 0) {
+      std::cout << "\nERROR: " << leaked_count
+           << " leaked mock " << (leaked_count == 1 ? "object" : "objects")
+           << " found at program exit.\n";
+      std::cout.flush();
+      ::std::cerr.flush();
+      // RUN_ALL_TESTS() has already returned when this destructor is
+      // called.  Therefore we cannot use the normal Google Test
+      // failure reporting mechanism.
+      _exit(1);  // We cannot call exit() as it is not reentrant and
+                 // may already have been called.
+    }
+  }
+
+  StateMap& states() { return states_; }
+
+ private:
+  StateMap states_;
+};
+
+// Protected by g_gmock_mutex.
+MockObjectRegistry g_mock_object_registry;
+
+// Maps a mock object to the reaction Google Mock should have when an
+// uninteresting method is called.  Protected by g_gmock_mutex.
+std::map<const void*, internal::CallReaction> g_uninteresting_call_reaction;
+
+// Sets the reaction Google Mock should have when an uninteresting
+// method of the given mock object is called.
+void SetReactionOnUninterestingCalls(const void* mock_obj,
+                                     internal::CallReaction reaction)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  g_uninteresting_call_reaction[mock_obj] = reaction;
+}
+
+}  // namespace
+
+// Tells Google Mock to allow uninteresting calls on the given mock
+// object.
+void Mock::AllowUninterestingCalls(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  SetReactionOnUninterestingCalls(mock_obj, internal::kAllow);
+}
+
+// Tells Google Mock to warn the user about uninteresting calls on the
+// given mock object.
+void Mock::WarnUninterestingCalls(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  SetReactionOnUninterestingCalls(mock_obj, internal::kWarn);
+}
+
+// Tells Google Mock to fail uninteresting calls on the given mock
+// object.
+void Mock::FailUninterestingCalls(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  SetReactionOnUninterestingCalls(mock_obj, internal::kFail);
+}
+
+// Tells Google Mock the given mock object is being destroyed and its
+// entry in the call-reaction table should be removed.
+void Mock::UnregisterCallReaction(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  g_uninteresting_call_reaction.erase(mock_obj);
+}
+
+// Returns the reaction Google Mock will have on uninteresting calls
+// made on the given mock object.
+internal::CallReaction Mock::GetReactionOnUninterestingCalls(
+    const void* mock_obj)
+        GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  return (g_uninteresting_call_reaction.count(mock_obj) == 0) ?
+      internal::kDefault : g_uninteresting_call_reaction[mock_obj];
+}
+
+// Tells Google Mock to ignore mock_obj when checking for leaked mock
+// objects.
+void Mock::AllowLeak(const void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  g_mock_object_registry.states()[mock_obj].leakable = true;
+}
+
+// Verifies and clears all expectations on the given mock object.  If
+// the expectations aren't satisfied, generates one or more Google
+// Test non-fatal failures and returns false.
+bool Mock::VerifyAndClearExpectations(void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  return VerifyAndClearExpectationsLocked(mock_obj);
+}
+
+// Verifies all expectations on the given mock object and clears its
+// default actions and expectations.  Returns true iff the
+// verification was successful.
+bool Mock::VerifyAndClear(void* mock_obj)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  ClearDefaultActionsLocked(mock_obj);
+  return VerifyAndClearExpectationsLocked(mock_obj);
+}
+
+// Verifies and clears all expectations on the given mock object.  If
+// the expectations aren't satisfied, generates one or more Google
+// Test non-fatal failures and returns false.
+bool Mock::VerifyAndClearExpectationsLocked(void* mock_obj)
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
+  internal::g_gmock_mutex.AssertHeld();
+  if (g_mock_object_registry.states().count(mock_obj) == 0) {
+    // No EXPECT_CALL() was set on the given mock object.
+    return true;
+  }
+
+  // Verifies and clears the expectations on each mock method in the
+  // given mock object.
+  bool expectations_met = true;
+  FunctionMockers& mockers =
+      g_mock_object_registry.states()[mock_obj].function_mockers;
+  for (FunctionMockers::const_iterator it = mockers.begin();
+       it != mockers.end(); ++it) {
+    if (!(*it)->VerifyAndClearExpectationsLocked()) {
+      expectations_met = false;
+    }
+  }
+
+  // We don't clear the content of mockers, as they may still be
+  // needed by ClearDefaultActionsLocked().
+  return expectations_met;
+}
+
+// Registers a mock object and a mock method it owns.
+void Mock::Register(const void* mock_obj,
+                    internal::UntypedFunctionMockerBase* mocker)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  g_mock_object_registry.states()[mock_obj].function_mockers.insert(mocker);
+}
+
+// Tells Google Mock where in the source code mock_obj is used in an
+// ON_CALL or EXPECT_CALL.  In case mock_obj is leaked, this
+// information helps the user identify which object it is.
+void Mock::RegisterUseByOnCallOrExpectCall(const void* mock_obj,
+                                           const char* file, int line)
+    GTEST_LOCK_EXCLUDED_(internal::g_gmock_mutex) {
+  internal::MutexLock l(&internal::g_gmock_mutex);
+  MockObjectState& state = g_mock_object_registry.states()[mock_obj];
+  if (state.first_used_file == NULL) {
+    state.first_used_file = file;
+    state.first_used_line = line;
+    const TestInfo* const test_info =
+        UnitTest::GetInstance()->current_test_info();
+    if (test_info != NULL) {
+      // TODO(wan@google.com): record the test case name when the
+      // ON_CALL or EXPECT_CALL is invoked from SetUpTestCase() or
+      // TearDownTestCase().
+      state.first_used_test_case = test_info->test_case_name();
+      state.first_used_test = test_info->name();
+    }
+  }
+}
+
+// Unregisters a mock method; removes the owning mock object from the
+// registry when the last mock method associated with it has been
+// unregistered.  This is called only in the destructor of
+// FunctionMockerBase.
+void Mock::UnregisterLocked(internal::UntypedFunctionMockerBase* mocker)
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
+  internal::g_gmock_mutex.AssertHeld();
+  for (MockObjectRegistry::StateMap::iterator it =
+           g_mock_object_registry.states().begin();
+       it != g_mock_object_registry.states().end(); ++it) {
+    FunctionMockers& mockers = it->second.function_mockers;
+    if (mockers.erase(mocker) > 0) {
+      // mocker was in mockers and has been just removed.
+      if (mockers.empty()) {
+        g_mock_object_registry.states().erase(it);
+      }
+      return;
+    }
+  }
+}
+
+// Clears all ON_CALL()s set on the given mock object.
+void Mock::ClearDefaultActionsLocked(void* mock_obj)
+    GTEST_EXCLUSIVE_LOCK_REQUIRED_(internal::g_gmock_mutex) {
+  internal::g_gmock_mutex.AssertHeld();
+
+  if (g_mock_object_registry.states().count(mock_obj) == 0) {
+    // No ON_CALL() was set on the given mock object.
+    return;
+  }
+
+  // Clears the default actions for each mock method in the given mock
+  // object.
+  FunctionMockers& mockers =
+      g_mock_object_registry.states()[mock_obj].function_mockers;
+  for (FunctionMockers::const_iterator it = mockers.begin();
+       it != mockers.end(); ++it) {
+    (*it)->ClearDefaultActionsLocked();
+  }
+
+  // We don't clear the content of mockers, as they may still be
+  // needed by VerifyAndClearExpectationsLocked().
+}
+
+Expectation::Expectation() {}
+
+Expectation::Expectation(
+    const internal::linked_ptr<internal::ExpectationBase>& an_expectation_base)
+    : expectation_base_(an_expectation_base) {}
+
+Expectation::~Expectation() {}
+
+// Adds an expectation to a sequence.
+void Sequence::AddExpectation(const Expectation& expectation) const {
+  if (*last_expectation_ != expectation) {
+    if (last_expectation_->expectation_base() != NULL) {
+      expectation.expectation_base()->immediate_prerequisites_
+          += *last_expectation_;
+    }
+    *last_expectation_ = expectation;
+  }
+}
+
+// Creates the implicit sequence if there isn't one.
+InSequence::InSequence() {
+  if (internal::g_gmock_implicit_sequence.get() == NULL) {
+    internal::g_gmock_implicit_sequence.set(new Sequence);
+    sequence_created_ = true;
+  } else {
+    sequence_created_ = false;
+  }
+}
+
+// Deletes the implicit sequence if it was created by the constructor
+// of this object.
+InSequence::~InSequence() {
+  if (sequence_created_) {
+    delete internal::g_gmock_implicit_sequence.get();
+    internal::g_gmock_implicit_sequence.set(NULL);
+  }
+}
+
+}  // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+namespace testing {
+
+// TODO(wan@google.com): support using environment variables to
+// control the flag values, like what Google Test does.
+
+GMOCK_DEFINE_bool_(catch_leaked_mocks, true,
+                   "true iff Google Mock should report leaked mock objects "
+                   "as failures.");
+
+GMOCK_DEFINE_string_(verbose, internal::kWarningVerbosity,
+                     "Controls how verbose Google Mock's output is."
+                     "  Valid values:\n"
+                     "  info    - prints all messages.\n"
+                     "  warning - prints warnings and errors.\n"
+                     "  error   - prints errors only.");
+
+namespace internal {
+
+// Parses a string as a command line flag.  The string should have the
+// format "--gmock_flag=value".  When def_optional is true, the
+// "=value" part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+static const char* ParseGoogleMockFlagValue(const char* str,
+                                            const char* flag,
+                                            bool def_optional) {
+  // str and flag must not be NULL.
+  if (str == NULL || flag == NULL) return NULL;
+
+  // The flag must start with "--gmock_".
+  const std::string flag_str = std::string("--gmock_") + flag;
+  const size_t flag_len = flag_str.length();
+  if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+  // Skips the flag name.
+  const char* flag_end = str + flag_len;
+
+  // When def_optional is true, it's OK to not have a "=value" part.
+  if (def_optional && (flag_end[0] == '\0')) {
+    return flag_end;
+  }
+
+  // If def_optional is true and there are more characters after the
+  // flag name, or if def_optional is false, there must be a '=' after
+  // the flag name.
+  if (flag_end[0] != '=') return NULL;
+
+  // Returns the string after "=".
+  return flag_end + 1;
+}
+
+// Parses a string for a Google Mock bool flag, in the form of
+// "--gmock_flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+static bool ParseGoogleMockBoolFlag(const char* str, const char* flag,
+                                    bool* value) {
+  // Gets the value of the flag as a string.
+  const char* const value_str = ParseGoogleMockFlagValue(str, flag, true);
+
+  // Aborts if the parsing failed.
+  if (value_str == NULL) return false;
+
+  // Converts the string value to a bool.
+  *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+  return true;
+}
+
+// Parses a string for a Google Mock string flag, in the form of
+// "--gmock_flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+template <typename String>
+static bool ParseGoogleMockStringFlag(const char* str, const char* flag,
+                                      String* value) {
+  // Gets the value of the flag as a string.
+  const char* const value_str = ParseGoogleMockFlagValue(str, flag, false);
+
+  // Aborts if the parsing failed.
+  if (value_str == NULL) return false;
+
+  // Sets *value to the value of the flag.
+  *value = value_str;
+  return true;
+}
+
+// The internal implementation of InitGoogleMock().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleMockImpl(int* argc, CharType** argv) {
+  // Makes sure Google Test is initialized.  InitGoogleTest() is
+  // idempotent, so it's fine if the user has already called it.
+  InitGoogleTest(argc, argv);
+  if (*argc <= 0) return;
+
+  for (int i = 1; i != *argc; i++) {
+    const std::string arg_string = StreamableToString(argv[i]);
+    const char* const arg = arg_string.c_str();
+
+    // Do we see a Google Mock flag?
+    if (ParseGoogleMockBoolFlag(arg, "catch_leaked_mocks",
+                                &GMOCK_FLAG(catch_leaked_mocks)) ||
+        ParseGoogleMockStringFlag(arg, "verbose", &GMOCK_FLAG(verbose))) {
+      // Yes.  Shift the remainder of the argv list left by one.  Note
+      // that argv has (*argc + 1) elements, the last one always being
+      // NULL.  The following loop moves the trailing NULL element as
+      // well.
+      for (int j = i; j != *argc; j++) {
+        argv[j] = argv[j + 1];
+      }
+
+      // Decrements the argument count.
+      (*argc)--;
+
+      // We also need to decrement the iterator as we just removed
+      // an element.
+      i--;
+    }
+  }
+}
+
+}  // namespace internal
+
+// Initializes Google Mock.  This must be called before running the
+// tests.  In particular, it parses a command line for the flags that
+// Google Mock recognizes.  Whenever a Google Mock flag is seen, it is
+// removed from argv, and *argc is decremented.
+//
+// No value is returned.  Instead, the Google Mock flag variables are
+// updated.
+//
+// Since Google Test is needed for Google Mock to work, this function
+// also initializes Google Test and parses its flags, if that hasn't
+// been done.
+GTEST_API_ void InitGoogleMock(int* argc, char** argv) {
+  internal::InitGoogleMockImpl(argc, argv);
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleMock(int* argc, wchar_t** argv) {
+  internal::InitGoogleMockImpl(argc, argv);
+}
+
+}  // namespace testing
diff --git a/internal/ceres/gmock_main.cc b/internal/ceres/gmock_main.cc
new file mode 100644
index 0000000..5800dc0
--- /dev/null
+++ b/internal/ceres/gmock_main.cc
@@ -0,0 +1,69 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#include <iostream>
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+// NOTE(keir): This flag is normally part of gtest within Google but isn't in
+// the open source Google Test, since it is build-system dependent. However for
+// Ceres this is needed for our tests. Add the new flag here.
+DEFINE_string(test_srcdir, "", "The location of the source code.");
+
+// MS C++ compiler/linker has a bug on Windows (not on Windows CE), which
+// causes a link error when _tmain is defined in a static library and UNICODE
+// is enabled. For this reason instead of _tmain, main function is used on
+// Windows. See the following link to track the current status of this bug:
+// http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=394464  // NOLINT
+#if GTEST_OS_WINDOWS_MOBILE
+# include <tchar.h>  // NOLINT
+
+GTEST_API_ int _tmain(int argc, TCHAR** argv) {
+#else
+GTEST_API_ int main(int argc, char** argv) {
+#endif  // GTEST_OS_WINDOWS_MOBILE
+  std::cout << "Running main() from gmock_main.cc\n";
+  google::InitGoogleLogging(argv[0]);
+  // Since Google Mock depends on Google Test, InitGoogleMock() is
+  // also responsible for initializing Google Test.  Therefore there's
+  // no need for calling testing::InitGoogleTest() separately.
+  testing::InitGoogleMock(&argc, argv);
+  // On Windows, gtest passes additional non-gflags command line flags to
+  // death-tests, specifically --gtest_filter & --gtest_internal_run_death_test
+  // in order that these unknown (to gflags) flags do not invoke an error in
+  // gflags, InitGoogleTest() (called by InitGoogleMock()) must be called
+  // before ParseCommandLineFlags() to handle & remove them before gflags
+  // parses the remaining flags.
+  CERES_GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
+  return RUN_ALL_TESTS();
+}
diff --git a/internal/ceres/gradient_checker.cc b/internal/ceres/gradient_checker.cc
new file mode 100644
index 0000000..411a67f
--- /dev/null
+++ b/internal/ceres/gradient_checker.cc
@@ -0,0 +1,277 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wjr@google.com (William Rucklidge),
+//          keir@google.com (Keir Mierle),
+//          dgossow@google.com (David Gossow)
+
+#include "ceres/gradient_checker.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "ceres/is_close.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+using internal::IsClose;
+using internal::StringAppendF;
+using internal::StringPrintf;
+using std::string;
+using std::vector;
+
+namespace {
+// Evaluate the cost function and transform the returned Jacobians to
+// the local space of the respective local parameterizations.
+bool EvaluateCostFunction(
+    const ceres::CostFunction* function,
+    double const* const * parameters,
+    const std::vector<const ceres::LocalParameterization*>&
+        local_parameterizations,
+    Vector* residuals,
+    std::vector<Matrix>* jacobians,
+    std::vector<Matrix>* local_jacobians) {
+  CHECK(residuals != nullptr);
+  CHECK(jacobians != nullptr);
+  CHECK(local_jacobians != nullptr);
+
+  const vector<int32_t>& block_sizes = function->parameter_block_sizes();
+  const int num_parameter_blocks = block_sizes.size();
+
+  // Allocate Jacobian matrices in local space.
+  local_jacobians->resize(num_parameter_blocks);
+  vector<double*> local_jacobian_data(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    int block_size = block_sizes.at(i);
+    if (local_parameterizations.at(i) != NULL) {
+      block_size = local_parameterizations.at(i)->LocalSize();
+    }
+    local_jacobians->at(i).resize(function->num_residuals(), block_size);
+    local_jacobians->at(i).setZero();
+    local_jacobian_data.at(i) = local_jacobians->at(i).data();
+  }
+
+  // Allocate Jacobian matrices in global space.
+  jacobians->resize(num_parameter_blocks);
+  vector<double*> jacobian_data(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    jacobians->at(i).resize(function->num_residuals(), block_sizes.at(i));
+    jacobians->at(i).setZero();
+    jacobian_data.at(i) = jacobians->at(i).data();
+  }
+
+  // Compute residuals & jacobians.
+  CHECK_NE(0, function->num_residuals());
+  residuals->resize(function->num_residuals());
+  residuals->setZero();
+  if (!function->Evaluate(parameters, residuals->data(),
+                          jacobian_data.data())) {
+    return false;
+  }
+
+  // Convert Jacobians from global to local space.
+  for (size_t i = 0; i < local_jacobians->size(); ++i) {
+    if (local_parameterizations.at(i) == NULL) {
+      local_jacobians->at(i) = jacobians->at(i);
+    } else {
+      int global_size = local_parameterizations.at(i)->GlobalSize();
+      int local_size = local_parameterizations.at(i)->LocalSize();
+      CHECK_EQ(jacobians->at(i).cols(), global_size);
+      Matrix global_J_local(global_size, local_size);
+      local_parameterizations.at(i)->ComputeJacobian(
+          parameters[i], global_J_local.data());
+      local_jacobians->at(i).noalias() = jacobians->at(i) * global_J_local;
+    }
+  }
+  return true;
+}
+} // namespace
+
+GradientChecker::GradientChecker(
+      const CostFunction* function,
+      const vector<const LocalParameterization*>* local_parameterizations,
+      const NumericDiffOptions& options) :
+        function_(function) {
+  CHECK(function != nullptr);
+  if (local_parameterizations != NULL) {
+    local_parameterizations_ = *local_parameterizations;
+  } else {
+    local_parameterizations_.resize(function->parameter_block_sizes().size(),
+                                    NULL);
+  }
+  DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
+      finite_diff_cost_function =
+      new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
+          function, DO_NOT_TAKE_OWNERSHIP, options);
+  finite_diff_cost_function_.reset(finite_diff_cost_function);
+
+  const vector<int32_t>& parameter_block_sizes =
+      function->parameter_block_sizes();
+  const int num_parameter_blocks = parameter_block_sizes.size();
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
+  }
+  finite_diff_cost_function->SetNumResiduals(function->num_residuals());
+}
+
+bool GradientChecker::Probe(double const* const * parameters,
+                            double relative_precision,
+                            ProbeResults* results_param) const {
+  int num_residuals = function_->num_residuals();
+
+  // Make sure that we have a place to store results, no matter if the user has
+  // provided an output argument.
+  ProbeResults* results;
+  ProbeResults results_local;
+  if (results_param != NULL) {
+    results = results_param;
+    results->residuals.resize(0);
+    results->jacobians.clear();
+    results->numeric_jacobians.clear();
+    results->local_jacobians.clear();
+    results->local_numeric_jacobians.clear();
+    results->error_log.clear();
+  } else {
+    results = &results_local;
+  }
+  results->maximum_relative_error = 0.0;
+  results->return_value = true;
+
+  // Evaluate the derivative using the user supplied code.
+  vector<Matrix>& jacobians = results->jacobians;
+  vector<Matrix>& local_jacobians = results->local_jacobians;
+  if (!EvaluateCostFunction(function_, parameters, local_parameterizations_,
+                       &results->residuals, &jacobians, &local_jacobians)) {
+    results->error_log = "Function evaluation with Jacobians failed.";
+    results->return_value = false;
+  }
+
+  // Evaluate the derivative using numeric derivatives.
+  vector<Matrix>& numeric_jacobians = results->numeric_jacobians;
+  vector<Matrix>& local_numeric_jacobians = results->local_numeric_jacobians;
+  Vector finite_diff_residuals;
+  if (!EvaluateCostFunction(finite_diff_cost_function_.get(), parameters,
+                            local_parameterizations_, &finite_diff_residuals,
+                            &numeric_jacobians, &local_numeric_jacobians)) {
+    results->error_log += "\nFunction evaluation with numerical "
+        "differentiation failed.";
+    results->return_value = false;
+  }
+
+  if (!results->return_value) {
+    return false;
+  }
+
+  for (int i = 0; i < num_residuals; ++i) {
+    if (!IsClose(
+        results->residuals[i],
+        finite_diff_residuals[i],
+        relative_precision,
+        NULL,
+        NULL)) {
+      results->error_log = "Function evaluation with and without Jacobians "
+          "resulted in different residuals.";
+      LOG(INFO) << results->residuals.transpose();
+      LOG(INFO) << finite_diff_residuals.transpose();
+      return false;
+    }
+  }
+
+  // See if any elements have relative error larger than the threshold.
+  int num_bad_jacobian_components = 0;
+  double& worst_relative_error = results->maximum_relative_error;
+  worst_relative_error = 0;
+
+  // Accumulate the error message for all the jacobians, since it won't get
+  // output if there are no bad jacobian components.
+  string error_log;
+  for (int k = 0; k < function_->parameter_block_sizes().size(); k++) {
+    StringAppendF(&error_log,
+                  "========== "
+                  "Jacobian for " "block %d: (%ld by %ld)) "
+                  "==========\n",
+                  k,
+                  static_cast<long>(local_jacobians[k].rows()),
+                  static_cast<long>(local_jacobians[k].cols()));
+    // The funny spacing creates appropriately aligned column headers.
+    error_log +=
+        " block  row  col        user dx/dy    num diff dx/dy         "
+        "abs error    relative error         parameter          residual\n";
+
+    for (int i = 0; i < local_jacobians[k].rows(); i++) {
+      for (int j = 0; j < local_jacobians[k].cols(); j++) {
+        double term_jacobian = local_jacobians[k](i, j);
+        double finite_jacobian = local_numeric_jacobians[k](i, j);
+        double relative_error, absolute_error;
+        bool bad_jacobian_entry =
+            !IsClose(term_jacobian,
+                     finite_jacobian,
+                     relative_precision,
+                     &relative_error,
+                     &absolute_error);
+        worst_relative_error = std::max(worst_relative_error, relative_error);
+
+        StringAppendF(&error_log,
+                      "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
+                      k, i, j,
+                      term_jacobian, finite_jacobian,
+                      absolute_error, relative_error,
+                      parameters[k][j],
+                      results->residuals[i]);
+
+        if (bad_jacobian_entry) {
+          num_bad_jacobian_components++;
+          StringAppendF(
+              &error_log,
+              " ------ (%d,%d,%d) Relative error worse than %g",
+              k, i, j, relative_precision);
+        }
+        error_log += "\n";
+      }
+    }
+  }
+
+  // Since there were some bad errors, dump comprehensive debug info.
+  if (num_bad_jacobian_components) {
+    string header = StringPrintf("\nDetected %d bad Jacobian component(s). "
+        "Worst relative error was %g.\n",
+        num_bad_jacobian_components,
+        worst_relative_error);
+     results->error_log = header + "\n" + error_log;
+    return false;
+  }
+  return true;
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc
new file mode 100644
index 0000000..92d7b26
--- /dev/null
+++ b/internal/ceres/gradient_checker_test.cc
@@ -0,0 +1,586 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains tests for the GradientChecker class.
+
+#include "ceres/gradient_checker.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/random.h"
+#include "ceres/solver.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// We pick a (non-quadratic) function whose derivative are easy:
+//
+//    f = exp(- a' x).
+//   df = - f a.
+//
+// where 'a' is a vector of the same size as 'x'. In the block
+// version, they are both block vectors, of course.
+class GoodTestTerm : public CostFunction {
+ public:
+  GoodTestTerm(int arity, int const* dim) : arity_(arity), return_value_(true) {
+    // Make 'arity' random vectors.
+    a_.resize(arity_);
+    for (int j = 0; j < arity_; ++j) {
+      a_[j].resize(dim[j]);
+      for (int u = 0; u < dim[j]; ++u) {
+        a_[j][u] = 2.0 * RandDouble() - 1.0;
+      }
+    }
+
+    for (int i = 0; i < arity_; i++) {
+      mutable_parameter_block_sizes()->push_back(dim[i]);
+    }
+    set_num_residuals(1);
+  }
+
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    if (!return_value_) {
+      return false;
+    }
+    // Compute a . x.
+    double ax = 0;
+    for (int j = 0; j < arity_; ++j) {
+      for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+        ax += a_[j][u] * parameters[j][u];
+      }
+    }
+
+    // This is the cost, but also appears as a factor
+    // in the derivatives.
+    double f = *residuals = exp(-ax);
+
+    // Accumulate 1st order derivatives.
+    if (jacobians) {
+      for (int j = 0; j < arity_; ++j) {
+        if (jacobians[j]) {
+          for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+            // See comments before class.
+            jacobians[j][u] = -f * a_[j][u];
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+  void SetReturnValue(bool return_value) { return_value_ = return_value; }
+
+ private:
+  int arity_;
+  bool return_value_;
+  vector<vector<double>> a_;  // our vectors.
+};
+
+class BadTestTerm : public CostFunction {
+ public:
+  BadTestTerm(int arity, int const* dim) : arity_(arity) {
+    // Make 'arity' random vectors.
+    a_.resize(arity_);
+    for (int j = 0; j < arity_; ++j) {
+      a_[j].resize(dim[j]);
+      for (int u = 0; u < dim[j]; ++u) {
+        a_[j][u] = 2.0 * RandDouble() - 1.0;
+      }
+    }
+
+    for (int i = 0; i < arity_; i++) {
+      mutable_parameter_block_sizes()->push_back(dim[i]);
+    }
+    set_num_residuals(1);
+  }
+
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    // Compute a . x.
+    double ax = 0;
+    for (int j = 0; j < arity_; ++j) {
+      for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+        ax += a_[j][u] * parameters[j][u];
+      }
+    }
+
+    // This is the cost, but also appears as a factor
+    // in the derivatives.
+    double f = *residuals = exp(-ax);
+
+    // Accumulate 1st order derivatives.
+    if (jacobians) {
+      for (int j = 0; j < arity_; ++j) {
+        if (jacobians[j]) {
+          for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+            // See comments before class.
+            jacobians[j][u] = -f * a_[j][u] + 0.001;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+ private:
+  int arity_;
+  vector<vector<double>> a_;  // our vectors.
+};
+
+const double kTolerance = 1e-6;
+
+void CheckDimensions(const GradientChecker::ProbeResults& results,
+                     const std::vector<int>& parameter_sizes,
+                     const std::vector<int>& local_parameter_sizes,
+                     int residual_size) {
+  CHECK_EQ(parameter_sizes.size(), local_parameter_sizes.size());
+  int num_parameters = parameter_sizes.size();
+  ASSERT_EQ(residual_size, results.residuals.size());
+  ASSERT_EQ(num_parameters, results.local_jacobians.size());
+  ASSERT_EQ(num_parameters, results.local_numeric_jacobians.size());
+  ASSERT_EQ(num_parameters, results.jacobians.size());
+  ASSERT_EQ(num_parameters, results.numeric_jacobians.size());
+  for (int i = 0; i < num_parameters; ++i) {
+    EXPECT_EQ(residual_size, results.local_jacobians.at(i).rows());
+    EXPECT_EQ(local_parameter_sizes[i], results.local_jacobians.at(i).cols());
+    EXPECT_EQ(residual_size, results.local_numeric_jacobians.at(i).rows());
+    EXPECT_EQ(local_parameter_sizes[i],
+              results.local_numeric_jacobians.at(i).cols());
+    EXPECT_EQ(residual_size, results.jacobians.at(i).rows());
+    EXPECT_EQ(parameter_sizes[i], results.jacobians.at(i).cols());
+    EXPECT_EQ(residual_size, results.numeric_jacobians.at(i).rows());
+    EXPECT_EQ(parameter_sizes[i], results.numeric_jacobians.at(i).cols());
+  }
+}
+
+TEST(GradientChecker, SmokeTest) {
+  srand(5);
+
+  // Test with 3 blocks of size 2, 3 and 4.
+  int const num_parameters = 3;
+  std::vector<int> parameter_sizes(3);
+  parameter_sizes[0] = 2;
+  parameter_sizes[1] = 3;
+  parameter_sizes[2] = 4;
+
+  // Make a random set of blocks.
+  FixedArray<double*> parameters(num_parameters);
+  for (int j = 0; j < num_parameters; ++j) {
+    parameters[j] = new double[parameter_sizes[j]];
+    for (int u = 0; u < parameter_sizes[j]; ++u) {
+      parameters[j][u] = 2.0 * RandDouble() - 1.0;
+    }
+  }
+
+  NumericDiffOptions numeric_diff_options;
+  GradientChecker::ProbeResults results;
+
+  // Test that Probe returns true for correct Jacobians.
+  GoodTestTerm good_term(num_parameters, parameter_sizes.data());
+  GradientChecker good_gradient_checker(&good_term, NULL, numeric_diff_options);
+  EXPECT_TRUE(good_gradient_checker.Probe(parameters.get(), kTolerance, NULL));
+  EXPECT_TRUE(
+      good_gradient_checker.Probe(parameters.get(), kTolerance, &results))
+      << results.error_log;
+
+  // Check that results contain sensible data.
+  ASSERT_EQ(results.return_value, true);
+  ASSERT_EQ(results.residuals.size(), 1);
+  CheckDimensions(results, parameter_sizes, parameter_sizes, 1);
+  EXPECT_GE(results.maximum_relative_error, 0.0);
+  EXPECT_TRUE(results.error_log.empty());
+
+  // Test that if the cost function return false, Probe should return false.
+  good_term.SetReturnValue(false);
+  EXPECT_FALSE(good_gradient_checker.Probe(parameters.get(), kTolerance, NULL));
+  EXPECT_FALSE(
+      good_gradient_checker.Probe(parameters.get(), kTolerance, &results))
+      << results.error_log;
+
+  // Check that results contain sensible data.
+  ASSERT_EQ(results.return_value, false);
+  ASSERT_EQ(results.residuals.size(), 1);
+  CheckDimensions(results, parameter_sizes, parameter_sizes, 1);
+  for (int i = 0; i < num_parameters; ++i) {
+    EXPECT_EQ(results.local_jacobians.at(i).norm(), 0);
+    EXPECT_EQ(results.local_numeric_jacobians.at(i).norm(), 0);
+  }
+  EXPECT_EQ(results.maximum_relative_error, 0.0);
+  EXPECT_FALSE(results.error_log.empty());
+
+  // Test that Probe returns false for incorrect Jacobians.
+  BadTestTerm bad_term(num_parameters, parameter_sizes.data());
+  GradientChecker bad_gradient_checker(&bad_term, NULL, numeric_diff_options);
+  EXPECT_FALSE(bad_gradient_checker.Probe(parameters.get(), kTolerance, NULL));
+  EXPECT_FALSE(
+      bad_gradient_checker.Probe(parameters.get(), kTolerance, &results));
+
+  // Check that results contain sensible data.
+  ASSERT_EQ(results.return_value, true);
+  ASSERT_EQ(results.residuals.size(), 1);
+  CheckDimensions(results, parameter_sizes, parameter_sizes, 1);
+  EXPECT_GT(results.maximum_relative_error, kTolerance);
+  EXPECT_FALSE(results.error_log.empty());
+
+  // Setting a high threshold should make the test pass.
+  EXPECT_TRUE(bad_gradient_checker.Probe(parameters.get(), 1.0, &results));
+
+  // Check that results contain sensible data.
+  ASSERT_EQ(results.return_value, true);
+  ASSERT_EQ(results.residuals.size(), 1);
+  CheckDimensions(results, parameter_sizes, parameter_sizes, 1);
+  EXPECT_GT(results.maximum_relative_error, 0.0);
+  EXPECT_TRUE(results.error_log.empty());
+
+  for (int j = 0; j < num_parameters; j++) {
+    delete[] parameters[j];
+  }
+}
+
+/**
+ * Helper cost function that multiplies the parameters by the given jacobians
+ * and adds a constant offset.
+ */
+class LinearCostFunction : public CostFunction {
+ public:
+  explicit LinearCostFunction(const Vector& residuals_offset)
+      : residuals_offset_(residuals_offset) {
+    set_num_residuals(residuals_offset_.size());
+  }
+
+  virtual bool Evaluate(double const* const* parameter_ptrs,
+                        double* residuals_ptr,
+                        double** residual_J_params) const {
+    CHECK_GE(residual_J_params_.size(), 0.0);
+    VectorRef residuals(residuals_ptr, residual_J_params_[0].rows());
+    residuals = residuals_offset_;
+
+    for (size_t i = 0; i < residual_J_params_.size(); ++i) {
+      const Matrix& residual_J_param = residual_J_params_[i];
+      int parameter_size = residual_J_param.cols();
+      ConstVectorRef param(parameter_ptrs[i], parameter_size);
+
+      // Compute residual.
+      residuals += residual_J_param * param;
+
+      // Return Jacobian.
+      if (residual_J_params != NULL && residual_J_params[i] != NULL) {
+        Eigen::Map<Matrix> residual_J_param_out(residual_J_params[i],
+                                                residual_J_param.rows(),
+                                                residual_J_param.cols());
+        if (jacobian_offsets_.count(i) != 0) {
+          residual_J_param_out = residual_J_param + jacobian_offsets_.at(i);
+        } else {
+          residual_J_param_out = residual_J_param;
+        }
+      }
+    }
+    return true;
+  }
+
+  void AddParameter(const Matrix& residual_J_param) {
+    CHECK_EQ(num_residuals(), residual_J_param.rows());
+    residual_J_params_.push_back(residual_J_param);
+    mutable_parameter_block_sizes()->push_back(residual_J_param.cols());
+  }
+
+  /// Add offset to the given Jacobian before returning it from Evaluate(),
+  /// thus introducing an error in the comutation.
+  void SetJacobianOffset(size_t index, Matrix offset) {
+    CHECK_LT(index, residual_J_params_.size());
+    CHECK_EQ(residual_J_params_[index].rows(), offset.rows());
+    CHECK_EQ(residual_J_params_[index].cols(), offset.cols());
+    jacobian_offsets_[index] = offset;
+  }
+
+ private:
+  std::vector<Matrix> residual_J_params_;
+  std::map<int, Matrix> jacobian_offsets_;
+  Vector residuals_offset_;
+};
+
+/**
+ * Helper local parameterization that multiplies the delta vector by the given
+ * jacobian and adds it to the parameter.
+ */
+class MatrixParameterization : public LocalParameterization {
+ public:
+  virtual bool Plus(const double* x,
+                    const double* delta,
+                    double* x_plus_delta) const {
+    VectorRef(x_plus_delta, GlobalSize()) =
+        ConstVectorRef(x, GlobalSize()) +
+        (global_J_local * ConstVectorRef(delta, LocalSize()));
+    return true;
+  }
+
+  virtual bool ComputeJacobian(const double* /*x*/, double* jacobian) const {
+    MatrixRef(jacobian, GlobalSize(), LocalSize()) = global_J_local;
+    return true;
+  }
+
+  virtual int GlobalSize() const { return global_J_local.rows(); }
+  virtual int LocalSize() const { return global_J_local.cols(); }
+
+  Matrix global_J_local;
+};
+
+// Helper function to compare two Eigen matrices (used in the test below).
+void ExpectMatricesClose(Matrix p, Matrix q, double tolerance) {
+  ASSERT_EQ(p.rows(), q.rows());
+  ASSERT_EQ(p.cols(), q.cols());
+  ExpectArraysClose(p.size(), p.data(), q.data(), tolerance);
+}
+
+TEST(GradientChecker, TestCorrectnessWithLocalParameterizations) {
+  // Create cost function.
+  Eigen::Vector3d residual_offset(100.0, 200.0, 300.0);
+  LinearCostFunction cost_function(residual_offset);
+  Eigen::Matrix<double, 3, 3, Eigen::RowMajor> j0;
+  j0.row(0) << 1.0, 2.0, 3.0;
+  j0.row(1) << 4.0, 5.0, 6.0;
+  j0.row(2) << 7.0, 8.0, 9.0;
+  Eigen::Matrix<double, 3, 2, Eigen::RowMajor> j1;
+  j1.row(0) << 10.0, 11.0;
+  j1.row(1) << 12.0, 13.0;
+  j1.row(2) << 14.0, 15.0;
+
+  Eigen::Vector3d param0(1.0, 2.0, 3.0);
+  Eigen::Vector2d param1(4.0, 5.0);
+
+  cost_function.AddParameter(j0);
+  cost_function.AddParameter(j1);
+
+  std::vector<int> parameter_sizes(2);
+  parameter_sizes[0] = 3;
+  parameter_sizes[1] = 2;
+  std::vector<int> local_parameter_sizes(2);
+  local_parameter_sizes[0] = 2;
+  local_parameter_sizes[1] = 2;
+
+  // Test cost function for correctness.
+  Eigen::Matrix<double, 3, 3, Eigen::RowMajor> j1_out;
+  Eigen::Matrix<double, 3, 2, Eigen::RowMajor> j2_out;
+  Eigen::Vector3d residual;
+  std::vector<const double*> parameters(2);
+  parameters[0] = param0.data();
+  parameters[1] = param1.data();
+  std::vector<double*> jacobians(2);
+  jacobians[0] = j1_out.data();
+  jacobians[1] = j2_out.data();
+  cost_function.Evaluate(parameters.data(), residual.data(), jacobians.data());
+
+  Matrix residual_expected = residual_offset + j0 * param0 + j1 * param1;
+
+  ExpectMatricesClose(j1_out, j0, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(j2_out, j1, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(residual, residual_expected, kTolerance);
+
+  // Create local parameterization.
+  Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local;
+  global_J_local.row(0) << 1.5, 2.5;
+  global_J_local.row(1) << 3.5, 4.5;
+  global_J_local.row(2) << 5.5, 6.5;
+
+  MatrixParameterization parameterization;
+  parameterization.global_J_local = global_J_local;
+
+  // Test local parameterization for correctness.
+  Eigen::Vector3d x(7.0, 8.0, 9.0);
+  Eigen::Vector2d delta(10.0, 11.0);
+
+  Eigen::Matrix<double, 3, 2, Eigen::RowMajor> global_J_local_out;
+  parameterization.ComputeJacobian(x.data(), global_J_local_out.data());
+  ExpectMatricesClose(global_J_local_out,
+                      global_J_local,
+                      std::numeric_limits<double>::epsilon());
+
+  Eigen::Vector3d x_plus_delta;
+  parameterization.Plus(x.data(), delta.data(), x_plus_delta.data());
+  Eigen::Vector3d x_plus_delta_expected = x + (global_J_local * delta);
+  ExpectMatricesClose(x_plus_delta, x_plus_delta_expected, kTolerance);
+
+  // Now test GradientChecker.
+  std::vector<const LocalParameterization*> parameterizations(2);
+  parameterizations[0] = &parameterization;
+  parameterizations[1] = NULL;
+  NumericDiffOptions numeric_diff_options;
+  GradientChecker::ProbeResults results;
+  GradientChecker gradient_checker(
+      &cost_function, &parameterizations, numeric_diff_options);
+
+  Problem::Options problem_options;
+  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+  problem_options.local_parameterization_ownership = DO_NOT_TAKE_OWNERSHIP;
+  Problem problem(problem_options);
+  Eigen::Vector3d param0_solver;
+  Eigen::Vector2d param1_solver;
+  problem.AddParameterBlock(param0_solver.data(), 3, &parameterization);
+  problem.AddParameterBlock(param1_solver.data(), 2);
+  problem.AddResidualBlock(
+      &cost_function, NULL, param0_solver.data(), param1_solver.data());
+  Solver::Options solver_options;
+  solver_options.check_gradients = true;
+  solver_options.initial_trust_region_radius = 1e10;
+  Solver solver;
+  Solver::Summary summary;
+
+  // First test case: everything is correct.
+  EXPECT_TRUE(gradient_checker.Probe(parameters.data(), kTolerance, NULL));
+  EXPECT_TRUE(gradient_checker.Probe(parameters.data(), kTolerance, &results))
+      << results.error_log;
+
+  // Check that results contain correct data.
+  ASSERT_EQ(results.return_value, true);
+  ExpectMatricesClose(
+      results.residuals, residual, std::numeric_limits<double>::epsilon());
+  CheckDimensions(results, parameter_sizes, local_parameter_sizes, 3);
+  ExpectMatricesClose(
+      results.local_jacobians.at(0), j0 * global_J_local, kTolerance);
+  ExpectMatricesClose(results.local_jacobians.at(1),
+                      j1,
+                      std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(
+      results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+  ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
+  ExpectMatricesClose(
+      results.jacobians.at(0), j0, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(
+      results.jacobians.at(1), j1, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(results.numeric_jacobians.at(0), j0, kTolerance);
+  ExpectMatricesClose(results.numeric_jacobians.at(1), j1, kTolerance);
+  EXPECT_GE(results.maximum_relative_error, 0.0);
+  EXPECT_TRUE(results.error_log.empty());
+
+  // Test interaction with the 'check_gradients' option in Solver.
+  param0_solver = param0;
+  param1_solver = param1;
+  solver.Solve(solver_options, &problem, &summary);
+  EXPECT_EQ(CONVERGENCE, summary.termination_type);
+  EXPECT_LE(summary.final_cost, 1e-12);
+
+  // Second test case: Mess up reported derivatives with respect to 3rd
+  // component of 1st parameter. Check should fail.
+  Eigen::Matrix<double, 3, 3, Eigen::RowMajor> j0_offset;
+  j0_offset.setZero();
+  j0_offset.col(2).setConstant(0.001);
+  cost_function.SetJacobianOffset(0, j0_offset);
+  EXPECT_FALSE(gradient_checker.Probe(parameters.data(), kTolerance, NULL));
+  EXPECT_FALSE(gradient_checker.Probe(parameters.data(), kTolerance, &results))
+      << results.error_log;
+
+  // Check that results contain correct data.
+  ASSERT_EQ(results.return_value, true);
+  ExpectMatricesClose(
+      results.residuals, residual, std::numeric_limits<double>::epsilon());
+  CheckDimensions(results, parameter_sizes, local_parameter_sizes, 3);
+  ASSERT_EQ(results.local_jacobians.size(), 2);
+  ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
+  ExpectMatricesClose(results.local_jacobians.at(0),
+                      (j0 + j0_offset) * global_J_local,
+                      kTolerance);
+  ExpectMatricesClose(results.local_jacobians.at(1),
+                      j1,
+                      std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(
+      results.local_numeric_jacobians.at(0), j0 * global_J_local, kTolerance);
+  ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
+  ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
+  ExpectMatricesClose(
+      results.jacobians.at(1), j1, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(results.numeric_jacobians.at(0), j0, kTolerance);
+  ExpectMatricesClose(results.numeric_jacobians.at(1), j1, kTolerance);
+  EXPECT_GT(results.maximum_relative_error, 0.0);
+  EXPECT_FALSE(results.error_log.empty());
+
+  // Test interaction with the 'check_gradients' option in Solver.
+  param0_solver = param0;
+  param1_solver = param1;
+  solver.Solve(solver_options, &problem, &summary);
+  EXPECT_EQ(FAILURE, summary.termination_type);
+
+  // Now, zero out the local parameterization Jacobian of the 1st parameter
+  // with respect to the 3rd component. This makes the combination of
+  // cost function and local parameterization return correct values again.
+  parameterization.global_J_local.row(2).setZero();
+
+  // Verify that the gradient checker does not treat this as an error.
+  EXPECT_TRUE(gradient_checker.Probe(parameters.data(), kTolerance, &results))
+      << results.error_log;
+
+  // Check that results contain correct data.
+  ASSERT_EQ(results.return_value, true);
+  ExpectMatricesClose(
+      results.residuals, residual, std::numeric_limits<double>::epsilon());
+  CheckDimensions(results, parameter_sizes, local_parameter_sizes, 3);
+  ASSERT_EQ(results.local_jacobians.size(), 2);
+  ASSERT_EQ(results.local_numeric_jacobians.size(), 2);
+  ExpectMatricesClose(results.local_jacobians.at(0),
+                      (j0 + j0_offset) * parameterization.global_J_local,
+                      kTolerance);
+  ExpectMatricesClose(results.local_jacobians.at(1),
+                      j1,
+                      std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(results.local_numeric_jacobians.at(0),
+                      j0 * parameterization.global_J_local,
+                      kTolerance);
+  ExpectMatricesClose(results.local_numeric_jacobians.at(1), j1, kTolerance);
+  ExpectMatricesClose(results.jacobians.at(0), j0 + j0_offset, kTolerance);
+  ExpectMatricesClose(
+      results.jacobians.at(1), j1, std::numeric_limits<double>::epsilon());
+  ExpectMatricesClose(results.numeric_jacobians.at(0), j0, kTolerance);
+  ExpectMatricesClose(results.numeric_jacobians.at(1), j1, kTolerance);
+  EXPECT_GE(results.maximum_relative_error, 0.0);
+  EXPECT_TRUE(results.error_log.empty());
+
+  // Test interaction with the 'check_gradients' option in Solver.
+  param0_solver = param0;
+  param1_solver = param1;
+  solver.Solve(solver_options, &problem, &summary);
+  EXPECT_EQ(CONVERGENCE, summary.termination_type);
+  EXPECT_LE(summary.final_cost, 1e-12);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
new file mode 100644
index 0000000..1afbec3
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -0,0 +1,289 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keir@google.com (Keir Mierle),
+//          dgossow@google.com (David Gossow)
+
+#include "ceres/gradient_checking_cost_function.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "ceres/gradient_checker.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/dynamic_numeric_diff_cost_function.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::abs;
+using std::max;
+using std::string;
+using std::vector;
+
+namespace {
+
+class GradientCheckingCostFunction : public CostFunction {
+ public:
+  GradientCheckingCostFunction(
+      const CostFunction* function,
+      const std::vector<const LocalParameterization*>* local_parameterizations,
+      const NumericDiffOptions& options,
+      double relative_precision,
+      const string& extra_info,
+      GradientCheckingIterationCallback* callback)
+      : function_(function),
+        gradient_checker_(function, local_parameterizations, options),
+        relative_precision_(relative_precision),
+        extra_info_(extra_info),
+        callback_(callback) {
+    CHECK(callback_ != nullptr);
+    const vector<int32_t>& parameter_block_sizes =
+        function->parameter_block_sizes();
+    *mutable_parameter_block_sizes() = parameter_block_sizes;
+    set_num_residuals(function->num_residuals());
+  }
+
+  virtual ~GradientCheckingCostFunction() { }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    if (!jacobians) {
+      // Nothing to check in this case; just forward.
+      return function_->Evaluate(parameters, residuals, NULL);
+    }
+
+    GradientChecker::ProbeResults results;
+    bool okay = gradient_checker_.Probe(parameters,
+                                        relative_precision_,
+                                        &results);
+
+    // If the cost function returned false, there's nothing we can say about
+    // the gradients.
+    if (results.return_value == false) {
+      return false;
+    }
+
+    // Copy the residuals.
+    const int num_residuals = function_->num_residuals();
+    MatrixRef(residuals, num_residuals, 1) = results.residuals;
+
+    // Copy the original jacobian blocks into the jacobians array.
+    const vector<int32_t>& block_sizes = function_->parameter_block_sizes();
+    for (int k = 0; k < block_sizes.size(); k++) {
+      if (jacobians[k] != NULL) {
+        MatrixRef(jacobians[k],
+                  results.jacobians[k].rows(),
+                  results.jacobians[k].cols()) = results.jacobians[k];
+      }
+    }
+
+    if (!okay) {
+      std::string error_log = "Gradient Error detected!\nExtra info for "
+          "this residual: " + extra_info_ + "\n" + results.error_log;
+      callback_->SetGradientErrorDetected(error_log);
+    }
+    return true;
+  }
+
+ private:
+  const CostFunction* function_;
+  GradientChecker gradient_checker_;
+  double relative_precision_;
+  string extra_info_;
+  GradientCheckingIterationCallback* callback_;
+};
+
+}  // namespace
+
+GradientCheckingIterationCallback::GradientCheckingIterationCallback()
+    : gradient_error_detected_(false) {
+}
+
+CallbackReturnType GradientCheckingIterationCallback::operator()(
+    const IterationSummary& summary) {
+  if (gradient_error_detected_) {
+    LOG(ERROR)<< "Gradient error detected. Terminating solver.";
+    return SOLVER_ABORT;
+  }
+  return SOLVER_CONTINUE;
+}
+void GradientCheckingIterationCallback::SetGradientErrorDetected(
+    std::string& error_log) {
+  std::lock_guard<std::mutex> l(mutex_);
+  gradient_error_detected_ = true;
+  error_log_ += "\n" + error_log;
+}
+
+CostFunction* CreateGradientCheckingCostFunction(
+    const CostFunction* cost_function,
+    const std::vector<const LocalParameterization*>* local_parameterizations,
+    double relative_step_size,
+    double relative_precision,
+    const std::string& extra_info,
+    GradientCheckingIterationCallback* callback) {
+  NumericDiffOptions numeric_diff_options;
+  numeric_diff_options.relative_step_size = relative_step_size;
+
+  return new GradientCheckingCostFunction(cost_function,
+                                          local_parameterizations,
+                                          numeric_diff_options,
+                                          relative_precision, extra_info,
+                                          callback);
+}
+
+ProblemImpl* CreateGradientCheckingProblemImpl(
+    ProblemImpl* problem_impl,
+    double relative_step_size,
+    double relative_precision,
+    GradientCheckingIterationCallback* callback) {
+  CHECK(callback != nullptr);
+  // We create new CostFunctions by wrapping the original CostFunction
+  // in a gradient checking CostFunction. So its okay for the
+  // ProblemImpl to take ownership of it and destroy it. The
+  // LossFunctions and LocalParameterizations are reused and since
+  // they are owned by problem_impl, gradient_checking_problem_impl
+  // should not take ownership of it.
+  Problem::Options gradient_checking_problem_options;
+  gradient_checking_problem_options.cost_function_ownership = TAKE_OWNERSHIP;
+  gradient_checking_problem_options.loss_function_ownership =
+      DO_NOT_TAKE_OWNERSHIP;
+  gradient_checking_problem_options.local_parameterization_ownership =
+      DO_NOT_TAKE_OWNERSHIP;
+  gradient_checking_problem_options.context = problem_impl->context();
+
+  NumericDiffOptions numeric_diff_options;
+  numeric_diff_options.relative_step_size = relative_step_size;
+
+  ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
+      gradient_checking_problem_options);
+
+  Program* program = problem_impl->mutable_program();
+
+  // For every ParameterBlock in problem_impl, create a new parameter
+  // block with the same local parameterization and constancy.
+  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks[i];
+    gradient_checking_problem_impl->AddParameterBlock(
+        parameter_block->mutable_user_state(),
+        parameter_block->Size(),
+        parameter_block->mutable_local_parameterization());
+
+    if (parameter_block->IsConstant()) {
+      gradient_checking_problem_impl->SetParameterBlockConstant(
+          parameter_block->mutable_user_state());
+    }
+
+    for (int i = 0; i <  parameter_block->Size(); ++i) {
+      gradient_checking_problem_impl->SetParameterUpperBound(
+          parameter_block->mutable_user_state(),
+          i,
+          parameter_block->UpperBound(i));
+      gradient_checking_problem_impl->SetParameterLowerBound(
+          parameter_block->mutable_user_state(),
+          i,
+          parameter_block->LowerBound(i));
+    }
+  }
+
+  // For every ResidualBlock in problem_impl, create a new
+  // ResidualBlock by wrapping its CostFunction inside a
+  // GradientCheckingCostFunction.
+  const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks[i];
+
+    // Build a human readable string which identifies the
+    // ResidualBlock. This is used by the GradientCheckingCostFunction
+    // when logging debugging information.
+    string extra_info = StringPrintf(
+        "Residual block id %d; depends on parameters [", i);
+    vector<double*> parameter_blocks;
+    vector<const LocalParameterization*> local_parameterizations;
+    parameter_blocks.reserve(residual_block->NumParameterBlocks());
+    local_parameterizations.reserve(residual_block->NumParameterBlocks());
+    for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+      parameter_blocks.push_back(parameter_block->mutable_user_state());
+      StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state());
+      extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]";
+      local_parameterizations.push_back(problem_impl->GetParameterization(
+          parameter_block->mutable_user_state()));
+    }
+
+    // Wrap the original CostFunction in a GradientCheckingCostFunction.
+    CostFunction* gradient_checking_cost_function =
+        new GradientCheckingCostFunction(residual_block->cost_function(),
+                                         &local_parameterizations,
+                                         numeric_diff_options,
+                                         relative_precision,
+                                         extra_info,
+                                         callback);
+
+    // The const_cast is necessary because
+    // ProblemImpl::AddResidualBlock can potentially take ownership of
+    // the LossFunction, but in this case we are guaranteed that this
+    // will not be the case, so this const_cast is harmless.
+    gradient_checking_problem_impl->AddResidualBlock(
+        gradient_checking_cost_function,
+        const_cast<LossFunction*>(residual_block->loss_function()),
+        parameter_blocks.data(),
+        static_cast<int>(parameter_blocks.size()));
+  }
+
+  // Normally, when a problem is given to the solver, we guarantee
+  // that the state pointers for each parameter block point to the
+  // user provided data. Since we are creating this new problem from a
+  // problem given to us at an arbitrary stage of the solve, we cannot
+  // depend on this being the case, so we explicitly call
+  // SetParameterBlockStatePtrsToUserStatePtrs to ensure that this is
+  // the case.
+  gradient_checking_problem_impl
+      ->mutable_program()
+      ->SetParameterBlockStatePtrsToUserStatePtrs();
+
+  return gradient_checking_problem_impl;
+}
+
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function.h b/internal/ceres/gradient_checking_cost_function.h
new file mode 100644
index 0000000..b2cd26e
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function.h
@@ -0,0 +1,111 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keir@google.com (Keir Mierle),
+//          dgossow@google.com (David Gossow)
+
+#ifndef CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
+#define CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
+
+#include <mutex>
+#include <string>
+
+#include "ceres/cost_function.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/local_parameterization.h"
+
+namespace ceres {
+namespace internal {
+
+class ProblemImpl;
+
+// Callback that collects information about gradient checking errors, and
+// will abort the solve as soon as an error occurs.
+class GradientCheckingIterationCallback : public IterationCallback {
+ public:
+  GradientCheckingIterationCallback();
+
+  // Will return SOLVER_CONTINUE until a gradient error has been detected,
+  // then return SOLVER_ABORT.
+  virtual CallbackReturnType operator()(const IterationSummary& summary);
+
+  // Notify this that a gradient error has occurred (thread safe).
+  void SetGradientErrorDetected(std::string& error_log);
+
+  // Retrieve error status (not thread safe).
+  bool gradient_error_detected() const { return gradient_error_detected_; }
+  const std::string& error_log() const { return error_log_; }
+ private:
+  bool gradient_error_detected_;
+  std::string error_log_;
+  std::mutex mutex_;
+};
+
+// Creates a CostFunction that checks the Jacobians that cost_function computes
+// with finite differences. This API is only intended for unit tests that intend
+// to  check the functionality of the GradientCheckingCostFunction
+// implementation directly.
+CostFunction* CreateGradientCheckingCostFunction(
+    const CostFunction* cost_function,
+    const std::vector<const LocalParameterization*>* local_parameterizations,
+    double relative_step_size,
+    double relative_precision,
+    const std::string& extra_info,
+    GradientCheckingIterationCallback* callback);
+
+// Create a new ProblemImpl object from the input problem_impl, where all
+// cost functions are wrapped so that each time their Evaluate method is called,
+// an additional check is performed that compares the Jacobians computed by
+// the original cost function with alternative Jacobians computed using
+// numerical differentiation. If local parameterizations are given for any
+// parameters, the Jacobians will be compared in the local space instead of the
+// ambient space. For details on the gradient checking procedure, see the
+// documentation of the GradientChecker class. If an error is detected in any
+// iteration, the respective cost function will notify the
+// GradientCheckingIterationCallback.
+//
+// The caller owns the returned ProblemImpl object.
+//
+// Note: This is quite inefficient and is intended only for debugging.
+//
+// relative_step_size and relative_precision are parameters to control
+// the numeric differentiation and the relative tolerance between the
+// jacobian computed by the CostFunctions in problem_impl and
+// jacobians obtained by numerically differentiating them. See the
+// documentation of 'numeric_derivative_relative_step_size' in solver.h for a
+// better explanation.
+ProblemImpl* CreateGradientCheckingProblemImpl(
+    ProblemImpl* problem_impl,
+    double relative_step_size,
+    double relative_precision,
+    GradientCheckingIterationCallback* callback);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
new file mode 100644
index 0000000..f08bcd0
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -0,0 +1,449 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/gradient_checking_cost_function.h"
+
+#include <cmath>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/random.h"
+#include "ceres/residual_block.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+using testing::AllOf;
+using testing::AnyNumber;
+using testing::HasSubstr;
+using testing::_;
+
+// Pick a (non-quadratic) function whose derivative are easy:
+//
+//    f = exp(- a' x).
+//   df = - f a.
+//
+// where 'a' is a vector of the same size as 'x'. In the block
+// version, they are both block vectors, of course.
+template<int bad_block = 1, int bad_variable = 2>
+class TestTerm : public CostFunction {
+ public:
+  // The constructor of this function needs to know the number
+  // of blocks desired, and the size of each block.
+  TestTerm(int arity, int const *dim) : arity_(arity) {
+    // Make 'arity' random vectors.
+    a_.resize(arity_);
+    for (int j = 0; j < arity_; ++j) {
+      a_[j].resize(dim[j]);
+      for (int u = 0; u < dim[j]; ++u) {
+        a_[j][u] = 2.0 * RandDouble() - 1.0;
+      }
+    }
+
+    for (int i = 0; i < arity_; i++) {
+      mutable_parameter_block_sizes()->push_back(dim[i]);
+    }
+    set_num_residuals(1);
+  }
+
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    // Compute a . x.
+    double ax = 0;
+    for (int j = 0; j < arity_; ++j) {
+      for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+        ax += a_[j][u] * parameters[j][u];
+      }
+    }
+
+    // This is the cost, but also appears as a factor
+    // in the derivatives.
+    double f = *residuals = exp(-ax);
+
+    // Accumulate 1st order derivatives.
+    if (jacobians) {
+      for (int j = 0; j < arity_; ++j) {
+        if (jacobians[j]) {
+          for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+            // See comments before class.
+            jacobians[j][u] = - f * a_[j][u];
+
+            if (bad_block == j && bad_variable == u) {
+              // Whoopsiedoopsie! Deliberately introduce a faulty jacobian entry
+              // like what happens when users make an error in their jacobian
+              // computations. This should get detected.
+              LOG(INFO) << "Poisoning jacobian for parameter block " << j
+                        << ", row 0, column " << u;
+              jacobians[j][u] += 500;
+            }
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+ private:
+  int arity_;
+  vector<vector<double>> a_;
+};
+
+TEST(GradientCheckingCostFunction, ResidualsAndJacobiansArePreservedTest) {
+  srand(5);
+
+  // Test with 3 blocks of size 2, 3 and 4.
+  int const arity = 3;
+  int const dim[arity] = { 2, 3, 4 };
+
+  // Make a random set of blocks.
+  vector<double*> parameters(arity);
+  for (int j = 0; j < arity; ++j) {
+    parameters[j] = new double[dim[j]];
+    for (int u = 0; u < dim[j]; ++u) {
+      parameters[j][u] = 2.0 * RandDouble() - 1.0;
+    }
+  }
+
+  double original_residual;
+  double residual;
+  vector<double*> original_jacobians(arity);
+  vector<double*> jacobians(arity);
+
+  for (int j = 0; j < arity; ++j) {
+    // Since residual is one dimensional the jacobians have the same
+    // size as the parameter blocks.
+    jacobians[j] = new double[dim[j]];
+    original_jacobians[j] = new double[dim[j]];
+  }
+
+  const double kRelativeStepSize = 1e-6;
+  const double kRelativePrecision = 1e-4;
+
+  TestTerm<-1, -1> term(arity, dim);
+  GradientCheckingIterationCallback callback;
+  std::unique_ptr<CostFunction> gradient_checking_cost_function(
+      CreateGradientCheckingCostFunction(&term, NULL,
+                                         kRelativeStepSize,
+                                         kRelativePrecision,
+                                         "Ignored.", &callback));
+  term.Evaluate(&parameters[0],
+                &original_residual,
+                &original_jacobians[0]);
+
+  gradient_checking_cost_function->Evaluate(&parameters[0],
+                                            &residual,
+                                            &jacobians[0]);
+  EXPECT_EQ(original_residual, residual);
+
+  for (int j = 0; j < arity; j++) {
+    for (int k = 0; k < dim[j]; ++k) {
+      EXPECT_EQ(original_jacobians[j][k], jacobians[j][k]);
+    }
+
+    delete[] parameters[j];
+    delete[] jacobians[j];
+    delete[] original_jacobians[j];
+  }
+}
+
+TEST(GradientCheckingCostFunction, SmokeTest) {
+  srand(5);
+
+  // Test with 3 blocks of size 2, 3 and 4.
+  int const arity = 3;
+  int const dim[arity] = { 2, 3, 4 };
+
+  // Make a random set of blocks.
+  vector<double*> parameters(arity);
+  for (int j = 0; j < arity; ++j) {
+    parameters[j] = new double[dim[j]];
+    for (int u = 0; u < dim[j]; ++u) {
+      parameters[j][u] = 2.0 * RandDouble() - 1.0;
+    }
+  }
+
+  double residual;
+  vector<double*> jacobians(arity);
+  for (int j = 0; j < arity; ++j) {
+    // Since residual is one dimensional the jacobians have the same size as the
+    // parameter blocks.
+    jacobians[j] = new double[dim[j]];
+  }
+
+  const double kRelativeStepSize = 1e-6;
+  const double kRelativePrecision = 1e-4;
+
+  // Should have one term that's bad, causing everything to get dumped.
+  LOG(INFO) << "Bad gradient";
+  {
+    TestTerm<1, 2> term(arity, dim);
+    GradientCheckingIterationCallback callback;
+    std::unique_ptr<CostFunction> gradient_checking_cost_function(
+        CreateGradientCheckingCostFunction(&term, NULL,
+                                           kRelativeStepSize,
+                                           kRelativePrecision,
+                                           "Fuzzy banana", &callback));
+    EXPECT_TRUE(
+        gradient_checking_cost_function->Evaluate(&parameters[0], &residual,
+                                                  &jacobians[0]));
+    EXPECT_TRUE(callback.gradient_error_detected());
+    EXPECT_TRUE(callback.error_log().find("Fuzzy banana") != std::string::npos);
+    EXPECT_TRUE(callback.error_log().find("(1,0,2) Relative error worse than")
+                != std::string::npos);
+  }
+
+  // The gradient is correct, so no errors are reported.
+  LOG(INFO) << "Good gradient";
+  {
+    TestTerm<-1, -1> term(arity, dim);
+    GradientCheckingIterationCallback callback;
+    std::unique_ptr<CostFunction> gradient_checking_cost_function(
+        CreateGradientCheckingCostFunction(&term, NULL,
+                                           kRelativeStepSize,
+                                           kRelativePrecision,
+                                           "Fuzzy banana", &callback));
+    EXPECT_TRUE(
+        gradient_checking_cost_function->Evaluate(&parameters[0], &residual,
+                                                  &jacobians[0]));
+    EXPECT_FALSE(callback.gradient_error_detected());
+  }
+
+  for (int j = 0; j < arity; j++) {
+    delete[] parameters[j];
+    delete[] jacobians[j];
+  }
+}
+
+// The following three classes are for the purposes of defining
+// function signatures. They have dummy Evaluate functions.
+
+// Trivial cost function that accepts a single argument.
+class UnaryCostFunction : public CostFunction {
+ public:
+  UnaryCostFunction(int num_residuals, int32_t parameter_block_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block_size);
+  }
+  virtual ~UnaryCostFunction() {}
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 1;
+    }
+    return true;
+  }
+};
+
+// Trivial cost function that accepts two arguments.
+class BinaryCostFunction: public CostFunction {
+ public:
+  BinaryCostFunction(int num_residuals,
+                     int32_t parameter_block1_size,
+                     int32_t parameter_block2_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 2;
+    }
+    return true;
+  }
+};
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+  TernaryCostFunction(int num_residuals,
+                      int32_t parameter_block1_size,
+                      int32_t parameter_block2_size,
+                      int32_t parameter_block3_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 3;
+    }
+    return true;
+  }
+};
+
+// Verify that the two ParameterBlocks are formed from the same user
+// array and have the same LocalParameterization object.
+void ParameterBlocksAreEquivalent(const ParameterBlock*  left,
+                                  const ParameterBlock* right) {
+  CHECK(left != nullptr);
+  CHECK(right != nullptr);
+  EXPECT_EQ(left->user_state(), right->user_state());
+  EXPECT_EQ(left->Size(), right->Size());
+  EXPECT_EQ(left->Size(), right->Size());
+  EXPECT_EQ(left->LocalSize(), right->LocalSize());
+  EXPECT_EQ(left->local_parameterization(), right->local_parameterization());
+  EXPECT_EQ(left->IsConstant(), right->IsConstant());
+}
+
+TEST(GradientCheckingProblemImpl, ProblemDimensionsMatch) {
+  // Parameter blocks with arbitrarily chosen initial values.
+  double x[] = {1.0, 2.0, 3.0};
+  double y[] = {4.0, 5.0, 6.0, 7.0};
+  double z[] = {8.0, 9.0, 10.0, 11.0, 12.0};
+  double w[] = {13.0, 14.0, 15.0, 16.0};
+
+  ProblemImpl problem_impl;
+  problem_impl.AddParameterBlock(x, 3);
+  problem_impl.AddParameterBlock(y, 4);
+  problem_impl.SetParameterBlockConstant(y);
+  problem_impl.AddParameterBlock(z, 5);
+  problem_impl.AddParameterBlock(w, 4, new QuaternionParameterization);
+  problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4) ,
+                                NULL, z, y);
+  problem_impl.AddResidualBlock(new BinaryCostFunction(3, 3, 5),
+                                new TrivialLoss, x, z);
+  problem_impl.AddResidualBlock(new BinaryCostFunction(7, 5, 3),
+                                NULL, z, x);
+  problem_impl.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4),
+                                NULL, z, x, y);
+
+  GradientCheckingIterationCallback callback;
+  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
+      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback));
+
+  // The dimensions of the two problems match.
+  EXPECT_EQ(problem_impl.NumParameterBlocks(),
+            gradient_checking_problem_impl->NumParameterBlocks());
+  EXPECT_EQ(problem_impl.NumResidualBlocks(),
+            gradient_checking_problem_impl->NumResidualBlocks());
+
+  EXPECT_EQ(problem_impl.NumParameters(),
+            gradient_checking_problem_impl->NumParameters());
+  EXPECT_EQ(problem_impl.NumResiduals(),
+            gradient_checking_problem_impl->NumResiduals());
+
+  const Program& program = problem_impl.program();
+  const Program& gradient_checking_program =
+      gradient_checking_problem_impl->program();
+
+  // Since we added the ParameterBlocks and ResidualBlocks explicitly,
+  // they should be in the same order in the two programs. It is
+  // possible that may change due to implementation changes to
+  // Program. This is not expected to be the case and writing code to
+  // anticipate that possibility not worth the extra complexity in
+  // this test.
+  for (int i = 0; i < program.parameter_blocks().size(); ++i) {
+    ParameterBlocksAreEquivalent(
+        program.parameter_blocks()[i],
+        gradient_checking_program.parameter_blocks()[i]);
+  }
+
+  for (int i = 0; i < program.residual_blocks().size(); ++i) {
+    // Compare the sizes of the two ResidualBlocks.
+    const ResidualBlock* original_residual_block =
+        program.residual_blocks()[i];
+    const ResidualBlock* new_residual_block =
+        gradient_checking_program.residual_blocks()[i];
+    EXPECT_EQ(original_residual_block->NumParameterBlocks(),
+              new_residual_block->NumParameterBlocks());
+    EXPECT_EQ(original_residual_block->NumResiduals(),
+              new_residual_block->NumResiduals());
+    EXPECT_EQ(original_residual_block->NumScratchDoublesForEvaluate(),
+              new_residual_block->NumScratchDoublesForEvaluate());
+
+    // Verify that the ParameterBlocks for the two residuals are equivalent.
+    for (int j = 0; j < original_residual_block->NumParameterBlocks(); ++j) {
+      ParameterBlocksAreEquivalent(
+          original_residual_block->parameter_blocks()[j],
+          new_residual_block->parameter_blocks()[j]);
+    }
+  }
+}
+
+
+TEST(GradientCheckingProblemImpl, ConstrainedProblemBoundsArePropagated) {
+  // Parameter blocks with arbitrarily chosen initial values.
+  double x[] = {1.0, 2.0, 3.0};
+  ProblemImpl problem_impl;
+  problem_impl.AddParameterBlock(x, 3);
+  problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  problem_impl.SetParameterLowerBound(x,0,0.9);
+  problem_impl.SetParameterUpperBound(x,1,2.5);
+
+  GradientCheckingIterationCallback callback;
+  std::unique_ptr<ProblemImpl> gradient_checking_problem_impl(
+      CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0, &callback));
+
+  // The dimensions of the two problems match.
+  EXPECT_EQ(problem_impl.NumParameterBlocks(),
+            gradient_checking_problem_impl->NumParameterBlocks());
+  EXPECT_EQ(problem_impl.NumResidualBlocks(),
+            gradient_checking_problem_impl->NumResidualBlocks());
+
+  EXPECT_EQ(problem_impl.NumParameters(),
+            gradient_checking_problem_impl->NumParameters());
+  EXPECT_EQ(problem_impl.NumResiduals(),
+            gradient_checking_problem_impl->NumResiduals());
+
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_EQ(problem_impl.GetParameterLowerBound(x, i),
+              gradient_checking_problem_impl->GetParameterLowerBound(x, i));
+    EXPECT_EQ(problem_impl.GetParameterUpperBound(x, i),
+              gradient_checking_problem_impl->GetParameterUpperBound(x, i));
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/gradient_problem.cc b/internal/ceres/gradient_problem.cc
new file mode 100644
index 0000000..4ebd3e6
--- /dev/null
+++ b/internal/ceres/gradient_problem.cc
@@ -0,0 +1,81 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/gradient_problem.h"
+#include "ceres/local_parameterization.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+GradientProblem::GradientProblem(FirstOrderFunction* function)
+    : function_(function),
+      parameterization_(
+          new IdentityParameterization(function_->NumParameters())),
+      scratch_(new double[function_->NumParameters()]) {
+}
+
+GradientProblem::GradientProblem(FirstOrderFunction* function,
+                                 LocalParameterization* parameterization)
+      : function_(function),
+        parameterization_(parameterization),
+        scratch_(new double[function_->NumParameters()]) {
+  CHECK_EQ(function_->NumParameters(), parameterization_->GlobalSize());
+}
+
+int GradientProblem::NumParameters() const {
+  return function_->NumParameters();
+}
+
+int GradientProblem::NumLocalParameters() const {
+  return parameterization_->LocalSize();
+}
+
+
+bool GradientProblem::Evaluate(const double* parameters,
+                               double* cost,
+                               double* gradient) const {
+  if (gradient == NULL) {
+    return function_->Evaluate(parameters, cost, NULL);
+  }
+
+  return (function_->Evaluate(parameters, cost, scratch_.get()) &&
+          parameterization_->MultiplyByJacobian(parameters,
+                                                1,
+                                                scratch_.get(),
+                                                gradient));
+}
+
+bool GradientProblem::Plus(const double* x,
+                           const double* delta,
+                           double* x_plus_delta) const {
+  return parameterization_->Plus(x, delta, x_plus_delta);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/gradient_problem_evaluator.h b/internal/ceres/gradient_problem_evaluator.h
new file mode 100644
index 0000000..5458631
--- /dev/null
+++ b/internal/ceres/gradient_problem_evaluator.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_GRADIENT_PROBLEM_EVALUATOR_H_
+#define CERES_INTERNAL_GRADIENT_PROBLEM_EVALUATOR_H_
+
+#include <map>
+#include <string>
+
+#include "ceres/evaluator.h"
+#include "ceres/execution_summary.h"
+#include "ceres/gradient_problem.h"
+#include "ceres/internal/port.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+class GradientProblemEvaluator : public Evaluator {
+ public:
+  explicit GradientProblemEvaluator(const GradientProblem& problem)
+      : problem_(problem) {}
+  virtual ~GradientProblemEvaluator() {}
+  virtual SparseMatrix* CreateJacobian() const { return NULL; }
+  virtual bool Evaluate(const EvaluateOptions& evaluate_options,
+                        const double* state,
+                        double* cost,
+                        double* residuals,
+                        double* gradient,
+                        SparseMatrix* jacobian) {
+    CHECK(jacobian == NULL);
+    ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
+    // The reason we use Residual and Jacobian here even when we are
+    // only computing the cost and gradient has to do with the fact
+    // that the line search minimizer code is used by both the
+    // GradientProblemSolver and the main CeresSolver coder where the
+    // Evaluator evaluates the Jacobian, and these magic strings need
+    // to be consistent across the code base for the time accounting
+    // to work.
+    ScopedExecutionTimer call_type_timer(
+        gradient == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian",
+        &execution_summary_);
+    return problem_.Evaluate(state, cost, gradient);
+  }
+
+  virtual bool Plus(const double* state,
+                    const double* delta,
+                    double* state_plus_delta) const {
+    return problem_.Plus(state, delta, state_plus_delta);
+  }
+
+  virtual int NumParameters() const {
+    return problem_.NumParameters();
+  }
+
+  virtual int NumEffectiveParameters()  const {
+    return problem_.NumLocalParameters();
+  }
+
+  virtual int NumResiduals() const { return 1; }
+
+  virtual std::map<std::string, internal::CallStatistics> Statistics() const {
+    return execution_summary_.statistics();
+  }
+
+ private:
+  const GradientProblem& problem_;
+  ::ceres::internal::ExecutionSummary execution_summary_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_GRADIENT_PROBLEM_EVALUATOR_H_
diff --git a/internal/ceres/gradient_problem_solver.cc b/internal/ceres/gradient_problem_solver.cc
new file mode 100644
index 0000000..1639e30
--- /dev/null
+++ b/internal/ceres/gradient_problem_solver.cc
@@ -0,0 +1,286 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/gradient_problem_solver.h"
+
+#include <memory>
+#include "ceres/callbacks.h"
+#include "ceres/gradient_problem.h"
+#include "ceres/gradient_problem_evaluator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/map_util.h"
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+#include "ceres/solver_utils.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+using internal::StringPrintf;
+using internal::StringAppendF;
+using std::string;
+
+namespace {
+
+Solver::Options GradientProblemSolverOptionsToSolverOptions(
+    const GradientProblemSolver::Options& options) {
+#define COPY_OPTION(x) solver_options.x = options.x
+
+  Solver::Options solver_options;
+  solver_options.minimizer_type = LINE_SEARCH;
+  COPY_OPTION(line_search_direction_type);
+  COPY_OPTION(line_search_type);
+  COPY_OPTION(nonlinear_conjugate_gradient_type);
+  COPY_OPTION(max_lbfgs_rank);
+  COPY_OPTION(use_approximate_eigenvalue_bfgs_scaling);
+  COPY_OPTION(line_search_interpolation_type);
+  COPY_OPTION(min_line_search_step_size);
+  COPY_OPTION(line_search_sufficient_function_decrease);
+  COPY_OPTION(max_line_search_step_contraction);
+  COPY_OPTION(min_line_search_step_contraction);
+  COPY_OPTION(max_num_line_search_step_size_iterations);
+  COPY_OPTION(max_num_line_search_direction_restarts);
+  COPY_OPTION(line_search_sufficient_curvature_decrease);
+  COPY_OPTION(max_line_search_step_expansion);
+  COPY_OPTION(max_num_iterations);
+  COPY_OPTION(max_solver_time_in_seconds);
+  COPY_OPTION(parameter_tolerance);
+  COPY_OPTION(function_tolerance);
+  COPY_OPTION(gradient_tolerance);
+  COPY_OPTION(logging_type);
+  COPY_OPTION(minimizer_progress_to_stdout);
+  COPY_OPTION(callbacks);
+  return solver_options;
+#undef COPY_OPTION
+}
+
+
+}  // namespace
+
+bool GradientProblemSolver::Options::IsValid(std::string* error) const {
+  const Solver::Options solver_options =
+      GradientProblemSolverOptionsToSolverOptions(*this);
+  return solver_options.IsValid(error);
+}
+
+GradientProblemSolver::~GradientProblemSolver() {
+}
+
+void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
+                                  const GradientProblem& problem,
+                                  double* parameters_ptr,
+                                  GradientProblemSolver::Summary* summary) {
+  using internal::CallStatistics;
+  using internal::GradientProblemEvaluator;
+  using internal::GradientProblemSolverStateUpdatingCallback;
+  using internal::LoggingCallback;
+  using internal::Minimizer;
+  using internal::SetSummaryFinalCost;
+  using internal::WallTimeInSeconds;
+
+  double start_time = WallTimeInSeconds();
+
+  CHECK(summary != nullptr);
+  *summary = Summary();
+  summary->num_parameters                    = problem.NumParameters();
+  summary->num_local_parameters              = problem.NumLocalParameters();
+  summary->line_search_direction_type        = options.line_search_direction_type;         //  NOLINT
+  summary->line_search_interpolation_type    = options.line_search_interpolation_type;     //  NOLINT
+  summary->line_search_type                  = options.line_search_type;
+  summary->max_lbfgs_rank                    = options.max_lbfgs_rank;
+  summary->nonlinear_conjugate_gradient_type = options.nonlinear_conjugate_gradient_type;  //  NOLINT
+
+  // Check validity
+  if (!options.IsValid(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
+
+  VectorRef parameters(parameters_ptr, problem.NumParameters());
+  Vector solution(problem.NumParameters());
+  solution = parameters;
+
+  // TODO(sameeragarwal): This is a bit convoluted, we should be able
+  // to convert to minimizer options directly, but this will do for
+  // now.
+  Minimizer::Options minimizer_options =
+      Minimizer::Options(GradientProblemSolverOptionsToSolverOptions(options));
+  minimizer_options.evaluator.reset(new GradientProblemEvaluator(problem));
+
+  std::unique_ptr<IterationCallback> logging_callback;
+  if (options.logging_type != SILENT) {
+    logging_callback.reset(
+        new LoggingCallback(LINE_SEARCH, options.minimizer_progress_to_stdout));
+    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+                                       logging_callback.get());
+  }
+
+  std::unique_ptr<IterationCallback> state_updating_callback;
+  if (options.update_state_every_iteration) {
+    state_updating_callback.reset(
+        new GradientProblemSolverStateUpdatingCallback(
+            problem.NumParameters(), solution.data(), parameters_ptr));
+    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+                                       state_updating_callback.get());
+  }
+
+  std::unique_ptr<Minimizer> minimizer(Minimizer::Create(LINE_SEARCH));
+
+  Solver::Summary solver_summary;
+  solver_summary.fixed_cost = 0.0;
+  solver_summary.preprocessor_time_in_seconds = 0.0;
+  solver_summary.postprocessor_time_in_seconds = 0.0;
+  solver_summary.line_search_polynomial_minimization_time_in_seconds = 0.0;
+
+  minimizer->Minimize(minimizer_options, solution.data(), &solver_summary);
+
+  summary->termination_type = solver_summary.termination_type;
+  summary->message          = solver_summary.message;
+  summary->initial_cost     = solver_summary.initial_cost;
+  summary->final_cost       = solver_summary.final_cost;
+  summary->iterations       = solver_summary.iterations;
+  summary->line_search_polynomial_minimization_time_in_seconds =
+      solver_summary.line_search_polynomial_minimization_time_in_seconds;
+
+  if (summary->IsSolutionUsable()) {
+    parameters = solution;
+    SetSummaryFinalCost(summary);
+  }
+
+  const std::map<string, CallStatistics>& evaluator_statistics =
+      minimizer_options.evaluator->Statistics();
+  {
+    const CallStatistics& call_stats = FindWithDefault(
+        evaluator_statistics, "Evaluator::Residual", CallStatistics());
+    summary->cost_evaluation_time_in_seconds = call_stats.time;
+    summary->num_cost_evaluations = call_stats.calls;
+  }
+
+  {
+    const CallStatistics& call_stats = FindWithDefault(
+        evaluator_statistics, "Evaluator::Jacobian", CallStatistics());
+    summary->gradient_evaluation_time_in_seconds = call_stats.time;
+    summary->num_gradient_evaluations = call_stats.calls;
+  }
+
+  summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
+}
+
+bool GradientProblemSolver::Summary::IsSolutionUsable() const {
+  return internal::IsSolutionUsable(*this);
+}
+
+string GradientProblemSolver::Summary::BriefReport() const {
+  return StringPrintf("Ceres GradientProblemSolver Report: "
+                      "Iterations: %d, "
+                      "Initial cost: %e, "
+                      "Final cost: %e, "
+                      "Termination: %s",
+                      static_cast<int>(iterations.size()),
+                      initial_cost,
+                      final_cost,
+                      TerminationTypeToString(termination_type));
+}
+
+string GradientProblemSolver::Summary::FullReport() const {
+  using internal::VersionString;
+
+  string report = string("\nSolver Summary (v " + VersionString() + ")\n\n");
+
+  StringAppendF(&report, "Parameters          % 25d\n", num_parameters);
+  if (num_local_parameters != num_parameters) {
+    StringAppendF(&report, "Local parameters    % 25d\n",
+                  num_local_parameters);
+  }
+
+  string line_search_direction_string;
+  if (line_search_direction_type == LBFGS) {
+    line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
+  } else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
+    line_search_direction_string =
+        NonlinearConjugateGradientTypeToString(
+            nonlinear_conjugate_gradient_type);
+  } else {
+    line_search_direction_string =
+        LineSearchDirectionTypeToString(line_search_direction_type);
+  }
+
+  StringAppendF(&report, "Line search direction     %19s\n",
+                line_search_direction_string.c_str());
+
+  const string line_search_type_string =
+      StringPrintf("%s %s",
+                   LineSearchInterpolationTypeToString(
+                       line_search_interpolation_type),
+                   LineSearchTypeToString(line_search_type));
+  StringAppendF(&report, "Line search type          %19s\n",
+                line_search_type_string.c_str());
+  StringAppendF(&report, "\n");
+
+  StringAppendF(&report, "\nCost:\n");
+  StringAppendF(&report, "Initial        % 30e\n", initial_cost);
+  if (termination_type != FAILURE &&
+      termination_type != USER_FAILURE) {
+    StringAppendF(&report, "Final          % 30e\n", final_cost);
+    StringAppendF(&report, "Change         % 30e\n",
+                  initial_cost - final_cost);
+  }
+
+  StringAppendF(&report, "\nMinimizer iterations         % 16d\n",
+                static_cast<int>(iterations.size()));
+
+  StringAppendF(&report, "\nTime (in seconds):\n");
+  StringAppendF(&report, "\n  Cost evaluation     %23.6f (%d)\n",
+                cost_evaluation_time_in_seconds,
+                num_cost_evaluations);
+  StringAppendF(&report, "  Gradient & cost evaluation %16.6f (%d)\n",
+                gradient_evaluation_time_in_seconds,
+                num_gradient_evaluations);
+  StringAppendF(&report, "  Polynomial minimization   %17.6f\n",
+                line_search_polynomial_minimization_time_in_seconds);
+  StringAppendF(&report, "Total               %25.6f\n\n",
+                total_time_in_seconds);
+
+  StringAppendF(&report, "Termination:        %25s (%s)\n",
+                TerminationTypeToString(termination_type), message.c_str());
+  return report;
+}
+
+void Solve(const GradientProblemSolver::Options& options,
+           const GradientProblem& problem,
+           double* parameters,
+           GradientProblemSolver::Summary* summary) {
+  GradientProblemSolver solver;
+  solver.Solve(options, problem, parameters, summary);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/gradient_problem_solver_test.cc b/internal/ceres/gradient_problem_solver_test.cc
new file mode 100644
index 0000000..20574de
--- /dev/null
+++ b/internal/ceres/gradient_problem_solver_test.cc
@@ -0,0 +1,136 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#include "ceres/gradient_problem.h"
+#include "ceres/gradient_problem_solver.h"
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Rosenbrock function; see http://en.wikipedia.org/wiki/Rosenbrock_function .
+class Rosenbrock : public ceres::FirstOrderFunction {
+ public:
+  virtual ~Rosenbrock() {}
+
+  virtual bool Evaluate(const double* parameters,
+                        double* cost,
+                        double* gradient) const {
+    const double x = parameters[0];
+    const double y = parameters[1];
+
+    cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
+    if (gradient != NULL) {
+      gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x;
+      gradient[1] = 200.0 * (y - x * x);
+    }
+    return true;
+  }
+
+  virtual int NumParameters() const { return 2; }
+};
+
+TEST(GradientProblemSolver, SolvesRosenbrockWithDefaultOptions) {
+  const double expected_tolerance = 1e-9;
+  double parameters[2] = {-1.2, 0.0};
+
+  ceres::GradientProblemSolver::Options options;
+  ceres::GradientProblemSolver::Summary summary;
+  ceres::GradientProblem problem(new Rosenbrock());
+  ceres::Solve(options, problem, parameters, &summary);
+
+  EXPECT_EQ(CONVERGENCE, summary.termination_type);
+  EXPECT_NEAR(1.0, parameters[0], expected_tolerance);
+  EXPECT_NEAR(1.0, parameters[1], expected_tolerance);
+}
+
+class QuadraticFunction : public ceres::FirstOrderFunction {
+  virtual ~QuadraticFunction() {}
+  virtual bool Evaluate(const double* parameters,
+                        double* cost,
+                        double* gradient) const {
+    const double x = parameters[0];
+    *cost = 0.5 * (5.0 - x) * (5.0 - x);
+    if (gradient != NULL) {
+      gradient[0] = x - 5.0;
+    }
+
+    return true;
+  }
+  virtual int NumParameters() const { return 1; }
+};
+
+struct RememberingCallback : public IterationCallback {
+  explicit RememberingCallback(double *x) : calls(0), x(x) {}
+  virtual ~RememberingCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    x_values.push_back(*x);
+    return SOLVER_CONTINUE;
+  }
+  int calls;
+  double *x;
+  std::vector<double> x_values;
+};
+
+
+TEST(Solver, UpdateStateEveryIterationOption) {
+  double x = 50.0;
+  const double original_x = x;
+
+  ceres::GradientProblem problem(new QuadraticFunction);
+  ceres::GradientProblemSolver::Options options;
+  RememberingCallback callback(&x);
+  options.callbacks.push_back(&callback);
+  ceres::GradientProblemSolver::Summary summary;
+
+  int num_iterations;
+
+  // First try: no updating.
+  ceres::Solve(options, problem, &x, &summary);
+  num_iterations = summary.iterations.size() - 1;
+  EXPECT_GT(num_iterations, 1);
+  for (int i = 0; i < callback.x_values.size(); ++i) {
+    EXPECT_EQ(50.0, callback.x_values[i]);
+  }
+
+  // Second try: with updating
+  x = 50.0;
+  options.update_state_every_iteration = true;
+  callback.x_values.clear();
+  ceres::Solve(options, problem, &x, &summary);
+  num_iterations = summary.iterations.size() - 1;
+  EXPECT_GT(num_iterations, 1);
+  EXPECT_EQ(original_x, callback.x_values[0]);
+  EXPECT_NE(original_x, callback.x_values[1]);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/gradient_problem_test.cc b/internal/ceres/gradient_problem_test.cc
new file mode 100644
index 0000000..b352577
--- /dev/null
+++ b/internal/ceres/gradient_problem_test.cc
@@ -0,0 +1,111 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#include "ceres/gradient_problem.h"
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class QuadraticTestFunction : public ceres::FirstOrderFunction {
+ public:
+  explicit QuadraticTestFunction(bool* flag_to_set_on_destruction = NULL)
+      : flag_to_set_on_destruction_(flag_to_set_on_destruction) {}
+
+  virtual ~QuadraticTestFunction() {
+    if (flag_to_set_on_destruction_) {
+      *flag_to_set_on_destruction_ = true;
+    }
+  }
+
+  virtual bool Evaluate(const double* parameters,
+                        double* cost,
+                        double* gradient) const {
+    const double x = parameters[0];
+    cost[0] = x * x;
+    if (gradient != NULL) {
+      gradient[0] = 2.0 * x;
+    }
+    return true;
+  }
+
+  virtual int NumParameters() const { return 1; }
+
+ private:
+  bool* flag_to_set_on_destruction_;
+};
+
+TEST(GradientProblem, TakesOwnershipOfFirstOrderFunction) {
+  bool is_destructed = false;
+  {
+    ceres::GradientProblem problem(new QuadraticTestFunction(&is_destructed));
+  }
+  EXPECT_TRUE(is_destructed);
+}
+
+TEST(GradientProblem, EvaluationWithoutParameterizationOrGradient) {
+  ceres::GradientProblem problem(new QuadraticTestFunction());
+  double x = 7.0;
+  double cost = 0;
+  problem.Evaluate(&x, &cost, NULL);
+  EXPECT_EQ(x * x, cost);
+}
+
+TEST(GradientProblem, EvalutaionWithParameterizationAndNoGradient) {
+  ceres::GradientProblem problem(new QuadraticTestFunction(),
+                                 new IdentityParameterization(1));
+  double x = 7.0;
+  double cost = 0;
+  problem.Evaluate(&x, &cost, NULL);
+  EXPECT_EQ(x * x, cost);
+}
+
+TEST(GradientProblem, EvaluationWithoutParameterizationAndWithGradient) {
+  ceres::GradientProblem problem(new QuadraticTestFunction());
+  double x = 7.0;
+  double cost = 0;
+  double gradient = 0;
+  problem.Evaluate(&x, &cost, &gradient);
+  EXPECT_EQ(2.0 * x, gradient);
+}
+
+TEST(GradientProblem, EvaluationWithParameterizationAndWithGradient) {
+  ceres::GradientProblem problem(new QuadraticTestFunction(),
+                                 new IdentityParameterization(1));
+  double x = 7.0;
+  double cost = 0;
+  double gradient = 0;
+  problem.Evaluate(&x, &cost, &gradient);
+  EXPECT_EQ(2.0 * x, gradient);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/graph.h b/internal/ceres/graph.h
new file mode 100644
index 0000000..4e1fd81
--- /dev/null
+++ b/internal/ceres/graph.h
@@ -0,0 +1,220 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_GRAPH_H_
+#define CERES_INTERNAL_GRAPH_H_
+
+#include <limits>
+#include <unordered_set>
+#include <unordered_map>
+#include <utility>
+#include "ceres/map_util.h"
+#include "ceres/pair_hash.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// A unweighted undirected graph templated over the vertex ids. Vertex
+// should be hashable.
+template <typename Vertex>
+class Graph {
+ public:
+  Graph() {}
+
+  // Add a vertex.
+  void AddVertex(const Vertex& vertex) {
+    if (vertices_.insert(vertex).second) {
+      edges_[vertex] = std::unordered_set<Vertex>();
+    }
+  }
+
+  bool RemoveVertex(const Vertex& vertex) {
+    if (vertices_.find(vertex) == vertices_.end()) {
+      return false;
+    }
+
+    vertices_.erase(vertex);
+    const std::unordered_set<Vertex>& sinks = edges_[vertex];
+    for (const Vertex& s : sinks) {
+      edges_[s].erase(vertex);
+    }
+
+    edges_.erase(vertex);
+    return true;
+  }
+
+  // Add an edge between the vertex1 and vertex2. Calling AddEdge on a
+  // pair of vertices which do not exist in the graph yet will result
+  // in undefined behavior.
+  //
+  // It is legal to call this method repeatedly for the same set of
+  // vertices.
+  void AddEdge(const Vertex& vertex1, const Vertex& vertex2) {
+    DCHECK(vertices_.find(vertex1) != vertices_.end());
+    DCHECK(vertices_.find(vertex2) != vertices_.end());
+
+    if (edges_[vertex1].insert(vertex2).second) {
+      edges_[vertex2].insert(vertex1);
+    }
+  }
+
+  // Calling Neighbors on a vertex not in the graph will result in
+  // undefined behaviour.
+  const std::unordered_set<Vertex>& Neighbors(const Vertex& vertex) const {
+    return FindOrDie(edges_, vertex);
+  }
+
+  const std::unordered_set<Vertex>& vertices() const {
+    return vertices_;
+  }
+
+ private:
+  std::unordered_set<Vertex> vertices_;
+  std::unordered_map<Vertex, std::unordered_set<Vertex>> edges_;
+};
+
+// A weighted undirected graph templated over the vertex ids. Vertex
+// should be hashable and comparable.
+template <typename Vertex>
+class WeightedGraph {
+ public:
+  WeightedGraph() {}
+
+  // Add a weighted vertex. If the vertex already exists in the graph,
+  // its weight is set to the new weight.
+  void AddVertex(const Vertex& vertex, double weight) {
+    if (vertices_.find(vertex) == vertices_.end()) {
+      vertices_.insert(vertex);
+      edges_[vertex] = std::unordered_set<Vertex>();
+    }
+    vertex_weights_[vertex] = weight;
+  }
+
+  // Uses weight = 1.0. If vertex already exists, its weight is set to
+  // 1.0.
+  void AddVertex(const Vertex& vertex) {
+    AddVertex(vertex, 1.0);
+  }
+
+  bool RemoveVertex(const Vertex& vertex) {
+    if (vertices_.find(vertex) == vertices_.end()) {
+      return false;
+    }
+
+    vertices_.erase(vertex);
+    vertex_weights_.erase(vertex);
+    const std::unordered_set<Vertex>& sinks = edges_[vertex];
+    for (const Vertex& s : sinks) {
+      if (vertex < s) {
+        edge_weights_.erase(std::make_pair(vertex, s));
+      } else {
+        edge_weights_.erase(std::make_pair(s, vertex));
+      }
+      edges_[s].erase(vertex);
+    }
+
+    edges_.erase(vertex);
+    return true;
+  }
+
+  // Add a weighted edge between the vertex1 and vertex2. Calling
+  // AddEdge on a pair of vertices which do not exist in the graph yet
+  // will result in undefined behavior.
+  //
+  // It is legal to call this method repeatedly for the same set of
+  // vertices.
+  void AddEdge(const Vertex& vertex1, const Vertex& vertex2, double weight) {
+    DCHECK(vertices_.find(vertex1) != vertices_.end());
+    DCHECK(vertices_.find(vertex2) != vertices_.end());
+
+    if (edges_[vertex1].insert(vertex2).second) {
+      edges_[vertex2].insert(vertex1);
+    }
+
+    if (vertex1 < vertex2) {
+      edge_weights_[std::make_pair(vertex1, vertex2)] = weight;
+    } else {
+      edge_weights_[std::make_pair(vertex2, vertex1)] = weight;
+    }
+  }
+
+  // Uses weight = 1.0.
+  void AddEdge(const Vertex& vertex1, const Vertex& vertex2) {
+    AddEdge(vertex1, vertex2, 1.0);
+  }
+
+  // Calling VertexWeight on a vertex not in the graph will result in
+  // undefined behavior.
+  double VertexWeight(const Vertex& vertex) const {
+    return FindOrDie(vertex_weights_, vertex);
+  }
+
+  // Calling EdgeWeight on a pair of vertices where either one of the
+  // vertices is not present in the graph will result in undefined
+  // behaviour. If there is no edge connecting vertex1 and vertex2,
+  // the edge weight is zero.
+  double EdgeWeight(const Vertex& vertex1, const Vertex& vertex2) const {
+    if (vertex1 < vertex2) {
+      return FindWithDefault(edge_weights_,
+                             std::make_pair(vertex1, vertex2), 0.0);
+    } else {
+      return FindWithDefault(edge_weights_,
+                             std::make_pair(vertex2, vertex1), 0.0);
+    }
+  }
+
+  // Calling Neighbors on a vertex not in the graph will result in
+  // undefined behaviour.
+  const std::unordered_set<Vertex>& Neighbors(const Vertex& vertex) const {
+    return FindOrDie(edges_, vertex);
+  }
+
+  const std::unordered_set<Vertex>& vertices() const {
+    return vertices_;
+  }
+
+  static double InvalidWeight() {
+    return std::numeric_limits<double>::quiet_NaN();
+  }
+
+ private:
+  std::unordered_set<Vertex> vertices_;
+  std::unordered_map<Vertex, double> vertex_weights_;
+  std::unordered_map<Vertex, std::unordered_set<Vertex>> edges_;
+  std::unordered_map<std::pair<Vertex, Vertex>, double, pair_hash>
+      edge_weights_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_GRAPH_H_
diff --git a/internal/ceres/graph_algorithms.h b/internal/ceres/graph_algorithms.h
new file mode 100644
index 0000000..b062931
--- /dev/null
+++ b/internal/ceres/graph_algorithms.h
@@ -0,0 +1,344 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Various algorithms that operate on undirected graphs.
+
+#ifndef CERES_INTERNAL_GRAPH_ALGORITHMS_H_
+#define CERES_INTERNAL_GRAPH_ALGORITHMS_H_
+
+#include <algorithm>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+#include <utility>
+#include "ceres/graph.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Compare two vertices of a graph by their degrees, if the degrees
+// are equal then order them by their ids.
+template <typename Vertex>
+class VertexTotalOrdering {
+ public:
+  explicit VertexTotalOrdering(const Graph<Vertex>& graph)
+      : graph_(graph) {}
+
+  bool operator()(const Vertex& lhs, const Vertex& rhs) const {
+    if (graph_.Neighbors(lhs).size() == graph_.Neighbors(rhs).size()) {
+      return lhs < rhs;
+    }
+    return graph_.Neighbors(lhs).size() < graph_.Neighbors(rhs).size();
+  }
+
+ private:
+  const Graph<Vertex>& graph_;
+};
+
+template <typename Vertex>
+class VertexDegreeLessThan {
+ public:
+  explicit VertexDegreeLessThan(const Graph<Vertex>& graph)
+      : graph_(graph) {}
+
+  bool operator()(const Vertex& lhs, const Vertex& rhs) const {
+    return graph_.Neighbors(lhs).size() < graph_.Neighbors(rhs).size();
+  }
+
+ private:
+  const Graph<Vertex>& graph_;
+};
+
+// Order the vertices of a graph using its (approximately) largest
+// independent set, where an independent set of a graph is a set of
+// vertices that have no edges connecting them. The maximum
+// independent set problem is NP-Hard, but there are effective
+// approximation algorithms available. The implementation here uses a
+// breadth first search that explores the vertices in order of
+// increasing degree. The same idea is used by Saad & Li in "MIQR: A
+// multilevel incomplete QR preconditioner for large sparse
+// least-squares problems", SIMAX, 2007.
+//
+// Given a undirected graph G(V,E), the algorithm is a greedy BFS
+// search where the vertices are explored in increasing order of their
+// degree. The output vector ordering contains elements of S in
+// increasing order of their degree, followed by elements of V - S in
+// increasing order of degree. The return value of the function is the
+// cardinality of S.
+template <typename Vertex>
+int IndependentSetOrdering(const Graph<Vertex>& graph,
+                           std::vector<Vertex>* ordering) {
+  const std::unordered_set<Vertex>& vertices = graph.vertices();
+  const int num_vertices = vertices.size();
+
+  CHECK(ordering != nullptr);
+  ordering->clear();
+  ordering->reserve(num_vertices);
+
+  // Colors for labeling the graph during the BFS.
+  const char kWhite = 0;
+  const char kGrey = 1;
+  const char kBlack = 2;
+
+  // Mark all vertices white.
+  std::unordered_map<Vertex, char> vertex_color;
+  std::vector<Vertex> vertex_queue;
+  for (const Vertex& vertex : vertices) {
+    vertex_color[vertex] = kWhite;
+    vertex_queue.push_back(vertex);
+  }
+
+  std::sort(vertex_queue.begin(),
+            vertex_queue.end(),
+            VertexTotalOrdering<Vertex>(graph));
+
+  // Iterate over vertex_queue. Pick the first white vertex, add it
+  // to the independent set. Mark it black and its neighbors grey.
+  for (const Vertex& vertex : vertex_queue) {
+    if (vertex_color[vertex] != kWhite) {
+      continue;
+    }
+
+    ordering->push_back(vertex);
+    vertex_color[vertex] = kBlack;
+    const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex);
+    for (const Vertex& neighbor : neighbors) {
+      vertex_color[neighbor] = kGrey;
+    }
+  }
+
+  int independent_set_size = ordering->size();
+
+  // Iterate over the vertices and add all the grey vertices to the
+  // ordering. At this stage there should only be black or grey
+  // vertices in the graph.
+  for (const Vertex& vertex : vertex_queue) {
+    DCHECK(vertex_color[vertex] != kWhite);
+    if (vertex_color[vertex] != kBlack) {
+      ordering->push_back(vertex);
+    }
+  }
+
+  CHECK_EQ(ordering->size(), num_vertices);
+  return independent_set_size;
+}
+
+// Same as above with one important difference. The ordering parameter
+// is an input/output parameter which carries an initial ordering of
+// the vertices of the graph. The greedy independent set algorithm
+// starts by sorting the vertices in increasing order of their
+// degree. The input ordering is used to stabilize this sort, i.e., if
+// two vertices have the same degree then they are ordered in the same
+// order in which they occur in "ordering".
+//
+// This is useful in eliminating non-determinism from the Schur
+// ordering algorithm over all.
+template <typename Vertex>
+int StableIndependentSetOrdering(const Graph<Vertex>& graph,
+                                 std::vector<Vertex>* ordering) {
+  CHECK(ordering != nullptr);
+  const std::unordered_set<Vertex>& vertices = graph.vertices();
+  const int num_vertices = vertices.size();
+  CHECK_EQ(vertices.size(), ordering->size());
+
+  // Colors for labeling the graph during the BFS.
+  const char kWhite = 0;
+  const char kGrey = 1;
+  const char kBlack = 2;
+
+  std::vector<Vertex> vertex_queue(*ordering);
+
+  std::stable_sort(vertex_queue.begin(), vertex_queue.end(),
+                  VertexDegreeLessThan<Vertex>(graph));
+
+  // Mark all vertices white.
+  std::unordered_map<Vertex, char> vertex_color;
+  for (const Vertex& vertex : vertices) {
+    vertex_color[vertex] = kWhite;
+  }
+
+  ordering->clear();
+  ordering->reserve(num_vertices);
+  // Iterate over vertex_queue. Pick the first white vertex, add it
+  // to the independent set. Mark it black and its neighbors grey.
+  for (int i = 0; i < vertex_queue.size(); ++i) {
+    const Vertex& vertex = vertex_queue[i];
+    if (vertex_color[vertex] != kWhite) {
+      continue;
+    }
+
+    ordering->push_back(vertex);
+    vertex_color[vertex] = kBlack;
+    const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex);
+    for (const Vertex& neighbor : neighbors) {
+      vertex_color[neighbor] = kGrey;
+    }
+  }
+
+  int independent_set_size = ordering->size();
+
+  // Iterate over the vertices and add all the grey vertices to the
+  // ordering. At this stage there should only be black or grey
+  // vertices in the graph.
+  for (const Vertex& vertex : vertex_queue) {
+    DCHECK(vertex_color[vertex] != kWhite);
+    if (vertex_color[vertex] != kBlack) {
+      ordering->push_back(vertex);
+    }
+  }
+
+  CHECK_EQ(ordering->size(), num_vertices);
+  return independent_set_size;
+}
+
+// Find the connected component for a vertex implemented using the
+// find and update operation for disjoint-set. Recursively traverse
+// the disjoint set structure till you reach a vertex whose connected
+// component has the same id as the vertex itself. Along the way
+// update the connected components of all the vertices. This updating
+// is what gives this data structure its efficiency.
+template <typename Vertex>
+Vertex FindConnectedComponent(const Vertex& vertex,
+                              std::unordered_map<Vertex, Vertex>* union_find) {
+  auto it = union_find->find(vertex);
+  DCHECK(it != union_find->end());
+  if (it->second != vertex) {
+    it->second = FindConnectedComponent(it->second, union_find);
+  }
+
+  return it->second;
+}
+
+// Compute a degree two constrained Maximum Spanning Tree/forest of
+// the input graph. Caller owns the result.
+//
+// Finding degree 2 spanning tree of a graph is not always
+// possible. For example a star graph, i.e. a graph with n-nodes
+// where one node is connected to the other n-1 nodes does not have
+// a any spanning trees of degree less than n-1.Even if such a tree
+// exists, finding such a tree is NP-Hard.
+
+// We get around both of these problems by using a greedy, degree
+// constrained variant of Kruskal's algorithm. We start with a graph
+// G_T with the same vertex set V as the input graph G(V,E) but an
+// empty edge set. We then iterate over the edges of G in decreasing
+// order of weight, adding them to G_T if doing so does not create a
+// cycle in G_T} and the degree of all the vertices in G_T remains
+// bounded by two. This O(|E|) algorithm results in a degree-2
+// spanning forest, or a collection of linear paths that span the
+// graph G.
+template <typename Vertex>
+WeightedGraph<Vertex>*
+Degree2MaximumSpanningForest(const WeightedGraph<Vertex>& graph) {
+  // Array of edges sorted in decreasing order of their weights.
+  std::vector<std::pair<double, std::pair<Vertex, Vertex>>> weighted_edges;
+  WeightedGraph<Vertex>* forest = new WeightedGraph<Vertex>();
+
+  // Disjoint-set to keep track of the connected components in the
+  // maximum spanning tree.
+  std::unordered_map<Vertex, Vertex> disjoint_set;
+
+  // Sort of the edges in the graph in decreasing order of their
+  // weight. Also add the vertices of the graph to the Maximum
+  // Spanning Tree graph and set each vertex to be its own connected
+  // component in the disjoint_set structure.
+  const std::unordered_set<Vertex>& vertices = graph.vertices();
+  for (const Vertex& vertex1 : vertices) {
+    forest->AddVertex(vertex1, graph.VertexWeight(vertex1));
+    disjoint_set[vertex1] = vertex1;
+
+    const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex1);
+    for (const Vertex& vertex2 : neighbors) {
+      if (vertex1 >= vertex2) {
+        continue;
+      }
+      const double weight = graph.EdgeWeight(vertex1, vertex2);
+      weighted_edges.push_back(
+          std::make_pair(weight, std::make_pair(vertex1, vertex2)));
+    }
+  }
+
+  // The elements of this vector, are pairs<edge_weight,
+  // edge>. Sorting it using the reverse iterators gives us the edges
+  // in decreasing order of edges.
+  std::sort(weighted_edges.rbegin(), weighted_edges.rend());
+
+  // Greedily add edges to the spanning tree/forest as long as they do
+  // not violate the degree/cycle constraint.
+  for (int i =0; i < weighted_edges.size(); ++i) {
+    const std::pair<Vertex, Vertex>& edge = weighted_edges[i].second;
+    const Vertex vertex1 = edge.first;
+    const Vertex vertex2 = edge.second;
+
+    // Check if either of the vertices are of degree 2 already, in
+    // which case adding this edge will violate the degree 2
+    // constraint.
+    if ((forest->Neighbors(vertex1).size() == 2) ||
+        (forest->Neighbors(vertex2).size() == 2)) {
+      continue;
+    }
+
+    // Find the id of the connected component to which the two
+    // vertices belong to. If the id is the same, it means that the
+    // two of them are already connected to each other via some other
+    // vertex, and adding this edge will create a cycle.
+    Vertex root1 = FindConnectedComponent(vertex1, &disjoint_set);
+    Vertex root2 = FindConnectedComponent(vertex2, &disjoint_set);
+
+    if (root1 == root2) {
+      continue;
+    }
+
+    // This edge can be added, add an edge in either direction with
+    // the same weight as the original graph.
+    const double edge_weight = graph.EdgeWeight(vertex1, vertex2);
+    forest->AddEdge(vertex1, vertex2, edge_weight);
+    forest->AddEdge(vertex2, vertex1, edge_weight);
+
+    // Connected the two connected components by updating the
+    // disjoint_set structure. Always connect the connected component
+    // with the greater index with the connected component with the
+    // smaller index. This should ensure shallower trees, for quicker
+    // lookup.
+    if (root2 < root1) {
+      std::swap(root1, root2);
+    }
+
+    disjoint_set[root2] = root1;
+  }
+  return forest;
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_GRAPH_ALGORITHMS_H_
diff --git a/internal/ceres/graph_algorithms_test.cc b/internal/ceres/graph_algorithms_test.cc
new file mode 100644
index 0000000..2aef327
--- /dev/null
+++ b/internal/ceres/graph_algorithms_test.cc
@@ -0,0 +1,248 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/graph_algorithms.h"
+
+#include <algorithm>
+#include <memory>
+#include <unordered_set>
+
+#include "ceres/graph.h"
+#include "ceres/internal/port.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+TEST(IndependentSetOrdering, Chain) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddVertex(2);
+  graph.AddVertex(3);
+  graph.AddVertex(4);
+
+  graph.AddEdge(0, 1);
+  graph.AddEdge(1, 2);
+  graph.AddEdge(2, 3);
+  graph.AddEdge(3, 4);
+
+  // 0-1-2-3-4
+  // 0, 2, 4 should be in the independent set.
+  vector<int> ordering;
+  int independent_set_size = IndependentSetOrdering(graph, &ordering);
+
+  sort(ordering.begin(), ordering.begin() + 3);
+  sort(ordering.begin() + 3, ordering.end());
+
+  EXPECT_EQ(independent_set_size, 3);
+  EXPECT_EQ(ordering.size(), 5);
+  EXPECT_EQ(ordering[0], 0);
+  EXPECT_EQ(ordering[1], 2);
+  EXPECT_EQ(ordering[2], 4);
+  EXPECT_EQ(ordering[3], 1);
+  EXPECT_EQ(ordering[4], 3);
+}
+
+TEST(IndependentSetOrdering, Star) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddVertex(2);
+  graph.AddVertex(3);
+  graph.AddVertex(4);
+
+  graph.AddEdge(0, 1);
+  graph.AddEdge(0, 2);
+  graph.AddEdge(0, 3);
+  graph.AddEdge(0, 4);
+
+  //      1
+  //      |
+  //    4-0-2
+  //      |
+  //      3
+  // 1, 2, 3, 4 should be in the independent set.
+  vector<int> ordering;
+  int independent_set_size = IndependentSetOrdering(graph, &ordering);
+  EXPECT_EQ(independent_set_size, 4);
+  EXPECT_EQ(ordering.size(), 5);
+  EXPECT_EQ(ordering[4], 0);
+  sort(ordering.begin(), ordering.begin() + 4);
+  EXPECT_EQ(ordering[0], 1);
+  EXPECT_EQ(ordering[1], 2);
+  EXPECT_EQ(ordering[2], 3);
+  EXPECT_EQ(ordering[3], 4);
+}
+
+TEST(Degree2MaximumSpanningForest, PreserveWeights) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0, 1.0);
+  graph.AddVertex(1, 2.0);
+  graph.AddEdge(0, 1, 0.5);
+  graph.AddEdge(1, 0, 0.5);
+
+  std::unique_ptr<WeightedGraph<int> > forest(
+					      Degree2MaximumSpanningForest(graph));
+
+  const std::unordered_set<int>& vertices = forest->vertices();
+  EXPECT_EQ(vertices.size(), 2);
+  EXPECT_EQ(forest->VertexWeight(0), 1.0);
+  EXPECT_EQ(forest->VertexWeight(1), 2.0);
+  EXPECT_EQ(forest->Neighbors(0).size(), 1.0);
+  EXPECT_EQ(forest->EdgeWeight(0, 1), 0.5);
+}
+
+TEST(Degree2MaximumSpanningForest, StarGraph) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddVertex(2);
+  graph.AddVertex(3);
+  graph.AddVertex(4);
+
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(0, 2, 2.0);
+  graph.AddEdge(0, 3, 3.0);
+  graph.AddEdge(0, 4, 4.0);
+
+  std::unique_ptr<WeightedGraph<int> > forest(Degree2MaximumSpanningForest(graph));
+  const std::unordered_set<int>& vertices = forest->vertices();
+  EXPECT_EQ(vertices.size(), 5);
+
+  {
+    const std::unordered_set<int>& neighbors = forest->Neighbors(0);
+    EXPECT_EQ(neighbors.size(), 2);
+    EXPECT_TRUE(neighbors.find(4) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(3) != neighbors.end());
+  }
+
+  {
+    const std::unordered_set<int>& neighbors = forest->Neighbors(3);
+    EXPECT_EQ(neighbors.size(), 1);
+    EXPECT_TRUE(neighbors.find(0) != neighbors.end());
+  }
+
+  {
+    const std::unordered_set<int>& neighbors = forest->Neighbors(4);
+    EXPECT_EQ(neighbors.size(), 1);
+    EXPECT_TRUE(neighbors.find(0) != neighbors.end());
+  }
+
+  {
+    const std::unordered_set<int>& neighbors = forest->Neighbors(1);
+    EXPECT_EQ(neighbors.size(), 0);
+  }
+
+  {
+    const std::unordered_set<int>& neighbors = forest->Neighbors(2);
+    EXPECT_EQ(neighbors.size(), 0);
+  }
+}
+
+TEST(VertexTotalOrdering, TotalOrdering) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddVertex(2);
+  graph.AddVertex(3);
+
+  // 0-1
+  //   |
+  // 2-3
+  // 0,1 and 2 have degree 1 and 3 has degree 2.
+  graph.AddEdge(0, 1);
+  graph.AddEdge(2, 3);
+  VertexTotalOrdering<int> less_than(graph);
+
+  for (int i = 0; i < 4; ++i) {
+    EXPECT_FALSE(less_than(i, i)) << "Failing vertex: " << i;
+    for (int j = 0; j < 4; ++j) {
+      if (i != j) {
+        EXPECT_TRUE(less_than(i, j) ^ less_than(j, i))
+            << "Failing vertex pair: " << i << " " << j;
+      }
+    }
+  }
+
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_TRUE(less_than(i, 3));
+    EXPECT_FALSE(less_than(3, i));
+  }
+}
+
+
+TEST(StableIndependentSet, BreakTies) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddVertex(2);
+  graph.AddVertex(3);
+
+  graph.AddEdge(0, 1);
+  graph.AddEdge(0, 2);
+  graph.AddEdge(0, 3);
+  graph.AddEdge(1, 2);
+  graph.AddEdge(1, 3);
+  graph.AddEdge(2, 3);
+
+  // Since this is a completely connected graph, the independent set
+  // contains exactly one vertex. StableIndependentSetOrdering
+  // guarantees that it will always be the first vertex in the
+  // ordering vector.
+  {
+    vector<int> ordering;
+    ordering.push_back(0);
+    ordering.push_back(1);
+    ordering.push_back(2);
+    ordering.push_back(3);
+    const int independent_set_size =
+        StableIndependentSetOrdering(graph, &ordering);
+    EXPECT_EQ(independent_set_size, 1);
+    EXPECT_EQ(ordering[0], 0);
+  }
+
+  {
+    vector<int> ordering;
+    ordering.push_back(1);
+    ordering.push_back(0);
+    ordering.push_back(2);
+    ordering.push_back(3);
+    const int independent_set_size =
+        StableIndependentSetOrdering(graph, &ordering);
+    EXPECT_EQ(independent_set_size, 1);
+    EXPECT_EQ(ordering[0], 1);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/graph_test.cc b/internal/ceres/graph_test.cc
new file mode 100644
index 0000000..8f05475
--- /dev/null
+++ b/internal/ceres/graph_test.cc
@@ -0,0 +1,151 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/graph.h"
+
+#include <unordered_set>
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(Graph, EmptyGraph) {
+  Graph<int> graph;
+  EXPECT_EQ(graph.vertices().size(), 0);
+}
+
+TEST(Graph, AddVertexAndEdge) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddEdge(0, 1);
+
+  const std::unordered_set<int>& vertices = graph.vertices();
+  EXPECT_EQ(vertices.size(), 2);
+  EXPECT_EQ(graph.Neighbors(0).size(), 1);
+  EXPECT_EQ(graph.Neighbors(1).size(), 1);
+}
+
+TEST(Graph, AddVertexIdempotence) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddEdge(0, 1);
+
+  const std::unordered_set<int>& vertices = graph.vertices();
+
+  EXPECT_EQ(vertices.size(), 2);
+
+  // Try adding the vertex again with a new weight.
+  graph.AddVertex(0);
+  EXPECT_EQ(vertices.size(), 2);
+
+  // Rest of the graph remains the same.
+  EXPECT_EQ(graph.Neighbors(0).size(), 1);
+  EXPECT_EQ(graph.Neighbors(1).size(), 1);
+}
+
+TEST(Graph, DieOnNonExistentVertex) {
+  Graph<int> graph;
+  graph.AddVertex(0);
+  graph.AddVertex(1);
+  graph.AddEdge(0, 1);
+
+  EXPECT_DEATH_IF_SUPPORTED(graph.Neighbors(2), "key not found");
+}
+
+TEST(WeightedGraph, EmptyGraph) {
+  WeightedGraph<int> graph;
+  EXPECT_EQ(graph.vertices().size(), 0);
+}
+
+TEST(WeightedGraph, AddVertexAndEdge) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0, 1.0);
+  graph.AddVertex(1, 2.0);
+  graph.AddEdge(0, 1, 0.5);
+
+  const std::unordered_set<int>& vertices = graph.vertices();
+  EXPECT_EQ(vertices.size(), 2);
+  EXPECT_EQ(graph.VertexWeight(0), 1.0);
+  EXPECT_EQ(graph.VertexWeight(1), 2.0);
+  EXPECT_EQ(graph.Neighbors(0).size(), 1);
+  EXPECT_EQ(graph.Neighbors(1).size(), 1);
+  EXPECT_EQ(graph.EdgeWeight(0, 1), 0.5);
+  EXPECT_EQ(graph.EdgeWeight(1, 0), 0.5);
+}
+
+TEST(WeightedGraph, AddVertexIdempotence) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0, 1.0);
+  graph.AddVertex(1, 2.0);
+  graph.AddEdge(0, 1, 0.5);
+
+  const std::unordered_set<int>& vertices = graph.vertices();
+
+  EXPECT_EQ(vertices.size(), 2);
+
+  // Try adding the vertex again with a new weight.
+  graph.AddVertex(0, 3.0);
+  EXPECT_EQ(vertices.size(), 2);
+
+  // The vertex weight is reset.
+  EXPECT_EQ(graph.VertexWeight(0), 3.0);
+
+  // Rest of the graph remains the same.
+  EXPECT_EQ(graph.VertexWeight(1), 2.0);
+  EXPECT_EQ(graph.Neighbors(0).size(), 1);
+  EXPECT_EQ(graph.Neighbors(1).size(), 1);
+  EXPECT_EQ(graph.EdgeWeight(0, 1), 0.5);
+  EXPECT_EQ(graph.EdgeWeight(1, 0), 0.5);
+}
+
+TEST(WeightedGraph, DieOnNonExistentVertex) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0, 1.0);
+  graph.AddVertex(1, 2.0);
+  graph.AddEdge(0, 1, 0.5);
+
+  EXPECT_DEATH_IF_SUPPORTED(graph.VertexWeight(2), "key not found");
+  EXPECT_DEATH_IF_SUPPORTED(graph.Neighbors(2), "key not found");
+}
+
+TEST(WeightedGraph, NonExistentEdge) {
+  WeightedGraph<int> graph;
+  graph.AddVertex(0, 1.0);
+  graph.AddVertex(1, 2.0);
+  graph.AddEdge(0, 1, 0.5);
+
+  // Default value for non-existent edges is 0.
+  EXPECT_EQ(graph.EdgeWeight(2, 3), 0);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/gtest/gtest.h b/internal/ceres/gtest/gtest.h
new file mode 100644
index 0000000..aea1f51
--- /dev/null
+++ b/internal/ceres/gtest/gtest.h
@@ -0,0 +1,21202 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test.  It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <ostream>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test.  They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms.  All macros ending with _ and symbols defined in an
+// internal namespace are subject to change without notice.  Code
+// outside Google Test MUST NOT USE THEM DIRECTLY.  Macros that don't
+// end with _ are part of Google Test's public API and can be used by
+// code outside Google Test.
+//
+// This file is fundamental to Google Test.  All other Google Test source
+// files are expected to #include this.  Therefore, it cannot #include
+// any other Google Test header.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// Environment-describing macros
+// -----------------------------
+//
+// Google Test can be used in many different environments.  Macros in
+// this section tell Google Test what kind of environment it is being
+// used in, such that Google Test can provide environment-specific
+// features and implementations.
+//
+// Google Test tries to automatically detect the properties of its
+// environment, so users usually don't need to worry about these
+// macros.  However, the automatic detection is not perfect.
+// Sometimes it's necessary for a user to define some of the following
+// macros in the build script to override Google Test's decisions.
+//
+// If the user doesn't define a macro in the list, Google Test will
+// provide a default definition.  After this header is #included, all
+// macros in this list will be defined to either 1 or 0.
+//
+// Notes to maintainers:
+//   - Each macro here is a user-tweakable knob; do not grow the list
+//     lightly.
+//   - Use #if to key off these macros.  Don't use #ifdef or "#if
+//     defined(...)", which will not work as these macros are ALWAYS
+//     defined.
+//
+//   GTEST_HAS_CLONE          - Define it to 1/0 to indicate that clone(2)
+//                              is/isn't available.
+//   GTEST_HAS_EXCEPTIONS     - Define it to 1/0 to indicate that exceptions
+//                              are enabled.
+//   GTEST_HAS_GLOBAL_STRING  - Define it to 1/0 to indicate that ::string
+//                              is/isn't available (some systems define
+//                              ::string, which is different to std::string).
+//   GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+//                              is/isn't available (some systems define
+//                              ::wstring, which is different to std::wstring).
+//   GTEST_HAS_POSIX_RE       - Define it to 1/0 to indicate that POSIX regular
+//                              expressions are/aren't available.
+//   GTEST_HAS_PTHREAD        - Define it to 1/0 to indicate that <pthread.h>
+//                              is/isn't available.
+//   GTEST_HAS_RTTI           - Define it to 1/0 to indicate that RTTI is/isn't
+//                              enabled.
+//   GTEST_HAS_STD_WSTRING    - Define it to 1/0 to indicate that
+//                              std::wstring does/doesn't work (Google Test can
+//                              be used where std::wstring is unavailable).
+//   GTEST_HAS_TR1_TUPLE      - Define it to 1/0 to indicate tr1::tuple
+//                              is/isn't available.
+//   GTEST_HAS_SEH            - Define it to 1/0 to indicate whether the
+//                              compiler supports Microsoft's "Structured
+//                              Exception Handling".
+//   GTEST_HAS_STREAM_REDIRECTION
+//                            - Define it to 1/0 to indicate whether the
+//                              platform supports I/O stream redirection using
+//                              dup() and dup2().
+//   GTEST_USE_OWN_TR1_TUPLE  - Define it to 1/0 to indicate whether Google
+//                              Test's own tr1 tuple implementation should be
+//                              used.  Unused when the user sets
+//                              GTEST_HAS_TR1_TUPLE to 0.
+//   GTEST_LANG_CXX11         - Define it to 1/0 to indicate that Google Test
+//                              is building in C++11/C++98 mode.
+//   GTEST_LINKED_AS_SHARED_LIBRARY
+//                            - Define to 1 when compiling tests that use
+//                              Google Test as a shared library (known as
+//                              DLL on Windows).
+//   GTEST_CREATE_SHARED_LIBRARY
+//                            - Define to 1 when compiling Google Test itself
+//                              as a shared library.
+
+// Platform-indicating macros
+// --------------------------
+//
+// Macros indicating the platform on which Google Test is being used
+// (a macro is defined to 1 if compiled on the given platform;
+// otherwise UNDEFINED -- it's never defined to 0.).  Google Test
+// defines these macros automatically.  Code outside Google Test MUST
+// NOT define them.
+//
+//   GTEST_OS_AIX      - IBM AIX
+//   GTEST_OS_CYGWIN   - Cygwin
+//   GTEST_OS_FREEBSD  - FreeBSD
+//   GTEST_OS_HPUX     - HP-UX
+//   GTEST_OS_LINUX    - Linux
+//     GTEST_OS_LINUX_ANDROID - Google Android
+//   GTEST_OS_MAC      - Mac OS X
+//     GTEST_OS_IOS    - iOS
+//   GTEST_OS_NACL     - Google Native Client (NaCl)
+//   GTEST_OS_OPENBSD  - OpenBSD
+//   GTEST_OS_QNX      - QNX
+//   GTEST_OS_SOLARIS  - Sun Solaris
+//   GTEST_OS_SYMBIAN  - Symbian
+//   GTEST_OS_WINDOWS  - Windows (Desktop, MinGW, or Mobile)
+//     GTEST_OS_WINDOWS_DESKTOP  - Windows Desktop
+//     GTEST_OS_WINDOWS_MINGW    - MinGW
+//     GTEST_OS_WINDOWS_MOBILE   - Windows Mobile
+//     GTEST_OS_WINDOWS_PHONE    - Windows Phone
+//     GTEST_OS_WINDOWS_RT       - Windows Store App/WinRT
+//   GTEST_OS_ZOS      - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support.  Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable.  If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// It is possible that none of the GTEST_OS_* macros are defined.
+
+// Feature-indicating macros
+// -------------------------
+//
+// Macros indicating which Google Test features are available (a macro
+// is defined to 1 if the corresponding feature is supported;
+// otherwise UNDEFINED -- it's never defined to 0.).  Google Test
+// defines these macros automatically.  Code outside Google Test MUST
+// NOT define them.
+//
+// These macros are public so that portable tests can be written.
+// Such tests typically surround code using a feature with an #if
+// which controls that code.  For example:
+//
+// #if GTEST_HAS_DEATH_TEST
+//   EXPECT_DEATH(DoSomethingDeadly());
+// #endif
+//
+//   GTEST_HAS_COMBINE      - the Combine() function (for value-parameterized
+//                            tests)
+//   GTEST_HAS_DEATH_TEST   - death tests
+//   GTEST_HAS_PARAM_TEST   - value-parameterized tests
+//   GTEST_HAS_TYPED_TEST   - typed tests
+//   GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+//   GTEST_IS_THREADSAFE    - Google Test is thread-safe.
+//   GTEST_USES_POSIX_RE    - enhanced POSIX regex is used. Do not confuse with
+//                            GTEST_HAS_POSIX_RE (see above) which users can
+//                            define themselves.
+//   GTEST_USES_SIMPLE_RE   - our own simple regex is used;
+//                            the above two are mutually exclusive.
+//   GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+
+// Misc public macros
+// ------------------
+//
+//   GTEST_FLAG(flag_name)  - references the variable corresponding to
+//                            the given Google Test flag.
+
+// Internal utilities
+// ------------------
+//
+// The following macros and utilities are for Google Test's INTERNAL
+// use only.  Code outside Google Test MUST NOT USE THEM DIRECTLY.
+//
+// Macros for basic C++ coding:
+//   GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+//   GTEST_ATTRIBUTE_UNUSED_  - declares that a class' instances or a
+//                              variable don't have to be used.
+//   GTEST_DISALLOW_ASSIGN_   - disables operator=.
+//   GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+//   GTEST_MUST_USE_RESULT_   - declares that a function's result must be used.
+//   GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is
+//                                        suppressed (constant conditional).
+//   GTEST_INTENTIONAL_CONST_COND_POP_  - finish code section where MSVC C4127
+//                                        is suppressed.
+//
+// C++11 feature wrappers:
+//
+//   testing::internal::move  - portability wrapper for std::move.
+//
+// Synchronization:
+//   Mutex, MutexLock, ThreadLocal, GetThreadCount()
+//                            - synchronization primitives.
+//
+// Template meta programming:
+//   is_pointer     - as in TR1; needed on Symbian and IBM XL C/C++ only.
+//   IteratorTraits - partial implementation of std::iterator_traits, which
+//                    is not available in libCstd when compiled with Sun C++.
+//
+// Smart pointers:
+//   scoped_ptr     - as in TR2.
+//
+// Regular expressions:
+//   RE             - a simple regular expression class using the POSIX
+//                    Extended Regular Expression syntax on UNIX-like
+//                    platforms, or a reduced regular exception syntax on
+//                    other platforms, including Windows.
+//
+// Logging:
+//   GTEST_LOG_()   - logs messages at the specified severity level.
+//   LogToStderr()  - directs all log messages to stderr.
+//   FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+//   CaptureStdout()     - starts capturing stdout.
+//   GetCapturedStdout() - stops capturing stdout and returns the captured
+//                         string.
+//   CaptureStderr()     - starts capturing stderr.
+//   GetCapturedStderr() - stops capturing stderr and returns the captured
+//                         string.
+//
+// Integer types:
+//   TypeWithSize   - maps an integer to a int type.
+//   Int32, UInt32, Int64, UInt64, TimeInMillis
+//                  - integers of known sizes.
+//   BiggestInt     - the biggest signed integer type.
+//
+// Command-line utilities:
+//   GTEST_DECLARE_*()  - declares a flag.
+//   GTEST_DEFINE_*()   - defines a flag.
+//   GetInjectableArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+//   GetEnv()             - gets the value of an environment variable.
+//   BoolFromGTestEnv()   - parses a bool environment variable.
+//   Int32FromGTestEnv()  - parses an Int32 environment variable.
+//   StringFromGTestEnv() - parses a string environment variable.
+
+#include <ctype.h>   // for isspace, etc
+#include <stddef.h>  // for ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif  // !_WIN32_WCE
+
+#if defined __APPLE__
+# include <AvailabilityMacros.h>
+# include <TargetConditionals.h>
+#endif
+
+#include <algorithm>  // NOLINT
+#include <iostream>  // NOLINT
+#include <sstream>  // NOLINT
+#include <string>  // NOLINT
+#include <utility>
+#include <vector>  // NOLINT
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the GTEST_OS_* macro.
+// It is separate from gtest-port.h so that custom/gtest-port.h can include it.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+# define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+# define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+#  define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(__MINGW__) || defined(__MINGW32__)
+#  define GTEST_OS_WINDOWS_MINGW 1
+# elif defined(WINAPI_FAMILY)
+#  include <winapifamily.h>
+#  if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+#   define GTEST_OS_WINDOWS_DESKTOP 1
+#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
+#   define GTEST_OS_WINDOWS_PHONE 1
+#  elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+#   define GTEST_OS_WINDOWS_RT 1
+#  else
+    // WINAPI_FAMILY defined but no known partition matched.
+    // Default to desktop.
+#   define GTEST_OS_WINDOWS_DESKTOP 1
+#  endif
+# else
+#  define GTEST_OS_WINDOWS_DESKTOP 1
+# endif  // _WIN32_WCE
+#elif defined __APPLE__
+# define GTEST_OS_MAC 1
+# if TARGET_OS_IPHONE
+#  define GTEST_OS_IOS 1
+# endif
+#elif defined __FreeBSD__
+# define GTEST_OS_FREEBSD 1
+#elif defined __linux__
+# define GTEST_OS_LINUX 1
+# if defined __ANDROID__
+#  define GTEST_OS_LINUX_ANDROID 1
+# endif
+#elif defined __MVS__
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# define GTEST_OS_NACL 1
+#elif defined __OpenBSD__
+# define GTEST_OS_OPENBSD 1
+#elif defined __QNX__
+# define GTEST_OS_QNX 1
+#endif  // __CYGWIN__
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Injection point for custom user configurations.
+// The following macros can be defined:
+//
+//   Flag related macros:
+//     GTEST_FLAG(flag_name)
+//     GTEST_USE_OWN_FLAGFILE_FLAG_  - Define to 0 when the system provides its
+//                                     own flagfile flag parsing.
+//     GTEST_DECLARE_bool_(name)
+//     GTEST_DECLARE_int32_(name)
+//     GTEST_DECLARE_string_(name)
+//     GTEST_DEFINE_bool_(name, default_val, doc)
+//     GTEST_DEFINE_int32_(name, default_val, doc)
+//     GTEST_DEFINE_string_(name, default_val, doc)
+//
+//   Test filtering:
+//     GTEST_TEST_FILTER_ENV_VAR_ - The name of an environment variable that
+//                                  will be used if --GTEST_FLAG(test_filter)
+//                                  is not provided.
+//
+//   Logging:
+//     GTEST_LOG_(severity)
+//     GTEST_CHECK_(condition)
+//     Functions LogToStderr() and FlushInfoLog() have to be provided too.
+//
+//   Threading:
+//     GTEST_HAS_NOTIFICATION_ - Enabled if Notification is already provided.
+//     GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ - Enabled if Mutex and ThreadLocal are
+//                                         already provided.
+//     Must also provide GTEST_DECLARE_STATIC_MUTEX_(mutex) and
+//     GTEST_DEFINE_STATIC_MUTEX_(mutex)
+//
+//     GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+//     GTEST_LOCK_EXCLUDED_(locks)
+//
+// ** Custom implementation starts here **
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+
+#if !defined(GTEST_DEV_EMAIL_)
+# define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+# define GTEST_FLAG_PREFIX_ "gtest_"
+# define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+# define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+# define GTEST_NAME_ "Google Test"
+# define GTEST_PROJECT_URL_ "https://github.com/google/googletest/"
+#endif  // !defined(GTEST_DEV_EMAIL_)
+
+#if !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+# define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest"
+#endif  // !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+    (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif  // __GNUC__
+
+// Macros for disabling Microsoft Visual C++ warnings.
+//
+//   GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385)
+//   /* code that triggers warnings C4800 and C4385 */
+//   GTEST_DISABLE_MSC_WARNINGS_POP_()
+#if _MSC_VER >= 1500
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \
+    __pragma(warning(push))                        \
+    __pragma(warning(disable: warnings))
+# define GTEST_DISABLE_MSC_WARNINGS_POP_()          \
+    __pragma(warning(pop))
+#else
+// Older versions of MSVC don't have __pragma.
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings)
+# define GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif
+
+#ifndef GTEST_LANG_CXX11
+// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when
+// -std={c,gnu}++{0x,11} is passed.  The C++11 standard specifies a
+// value for __cplusplus, and recent versions of clang, gcc, and
+// probably other compilers set that too in C++11 mode.
+# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L
+// Compiling in at least C++11 mode.
+#  define GTEST_LANG_CXX11 1
+# else
+#  define GTEST_LANG_CXX11 0
+# endif
+#endif
+
+// Distinct from C++11 language support, some environments don't provide
+// proper C++11 library support. Notably, it's possible to build in
+// C++11 mode when targeting Mac OS X 10.6, which has an old libstdc++
+// with no C++11 support.
+//
+// libstdc++ has sufficient C++11 support as of GCC 4.6.0, __GLIBCXX__
+// 20110325, but maintenance releases in the 4.4 and 4.5 series followed
+// this date, so check for those versions by their date stamps.
+// https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html#abi.versioning
+#if GTEST_LANG_CXX11 && \
+    (!defined(__GLIBCXX__) || ( \
+        __GLIBCXX__ >= 20110325ul &&  /* GCC >= 4.6.0 */ \
+        /* Blacklist of patch releases of older branches: */ \
+        __GLIBCXX__ != 20110416ul &&  /* GCC 4.4.6 */ \
+        __GLIBCXX__ != 20120313ul &&  /* GCC 4.4.7 */ \
+        __GLIBCXX__ != 20110428ul &&  /* GCC 4.5.3 */ \
+        __GLIBCXX__ != 20120702ul))   /* GCC 4.5.4 */
+# define GTEST_STDLIB_CXX11 1
+#endif
+
+// Only use C++11 library features if the library provides them.
+#if GTEST_STDLIB_CXX11
+# define GTEST_HAS_STD_BEGIN_AND_END_ 1
+# define GTEST_HAS_STD_FORWARD_LIST_ 1
+# define GTEST_HAS_STD_FUNCTION_ 1
+# define GTEST_HAS_STD_INITIALIZER_LIST_ 1
+# define GTEST_HAS_STD_MOVE_ 1
+# define GTEST_HAS_STD_SHARED_PTR_ 1
+# define GTEST_HAS_STD_TYPE_TRAITS_ 1
+# define GTEST_HAS_STD_UNIQUE_PTR_ 1
+#endif
+
+// C++11 specifies that <tuple> provides std::tuple.
+// Some platforms still might not have it, however.
+#if GTEST_LANG_CXX11
+# define GTEST_HAS_STD_TUPLE_ 1
+# if defined(__clang__)
+// Inspired by http://clang.llvm.org/docs/LanguageExtensions.html#__has_include
+#  if defined(__has_include) && !__has_include(<tuple>)
+#   undef GTEST_HAS_STD_TUPLE_
+#  endif
+# elif defined(_MSC_VER)
+// Inspired by boost/config/stdlib/dinkumware.hpp
+#  if defined(_CPPLIB_VER) && _CPPLIB_VER < 520
+#   undef GTEST_HAS_STD_TUPLE_
+#  endif
+# elif defined(__GLIBCXX__)
+// Inspired by boost/config/stdlib/libstdcpp3.hpp,
+// http://gcc.gnu.org/gcc-4.2/changes.html and
+// http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt01ch01.html#manual.intro.status.standard.200x
+#  if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+#   undef GTEST_HAS_STD_TUPLE_
+#  endif
+# endif
+#endif
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if GTEST_OS_WINDOWS
+# if !GTEST_OS_WINDOWS_MOBILE
+#  include <direct.h>
+#  include <io.h>
+# endif
+// In order to avoid having to include <windows.h>, use forward declaration
+#if GTEST_OS_WINDOWS_MINGW && !defined(__MINGW64_VERSION_MAJOR)
+// MinGW defined _CRITICAL_SECTION and _RTL_CRITICAL_SECTION as two
+// separate (equivalent) structs, instead of using typedef
+typedef struct _CRITICAL_SECTION GTEST_CRITICAL_SECTION;
+#else
+// Assume CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION.
+// This assumption is verified by
+// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION.
+typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
+#endif
+#else
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# include <strings.h>
+#endif  // GTEST_OS_WINDOWS
+
+#if GTEST_OS_LINUX_ANDROID
+// Used to define __ANDROID_API__ matching the target NDK API level.
+#  include <android/api-level.h>  // NOLINT
+#endif
+
+// Defines this to true iff Google Test can use POSIX regular expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# if GTEST_OS_LINUX_ANDROID
+// On Android, <regex.h> is only available starting with Gingerbread.
+#  define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)
+# else
+#  define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
+# endif
+#endif
+
+#if GTEST_USES_PCRE
+// The appropriate headers have already been included.
+
+#elif GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise.  We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h>  // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows.  Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform.  Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#endif  // GTEST_USES_PCRE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+#  ifndef _HAS_EXCEPTIONS
+#   define _HAS_EXCEPTIONS 1
+#  endif  // _HAS_EXCEPTIONS
+#  define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__clang__)
+// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714,
+// but iff cleanups are enabled after that. In Obj-C++ files, there can be
+// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions
+// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++
+// exceptions starting at clang r206352, but which checked for cleanups prior to
+// that. To reliably check for C++ exception availability with clang, check for
+// __EXCEPTIONS && __has_feature(cxx_exceptions).
+#  define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions.  However, there is no compile-time way of
+// detecting whether they are enabled or not.  Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+#  define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+#  define GTEST_HAS_EXCEPTIONS 0
+# endif  // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif  // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+# define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+# error "Google Test cannot be used where ::std::string isn't available."
+#endif  // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+# define GTEST_HAS_GLOBAL_STRING 0
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+//   is available.
+
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either.  Android has
+// no support for it at least as recent as Froyo (2.2).
+# define GTEST_HAS_STD_WSTRING \
+    (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+
+#endif  // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+# define GTEST_HAS_GLOBAL_WSTRING \
+    (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+#  ifdef _CPPRTTI  // MSVC defines this macro iff RTTI is enabled.
+#   define GTEST_HAS_RTTI 1
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+#  ifdef __GXX_RTTI
+// When building against STLport with the Android NDK and with
+// -frtti -fno-exceptions, the build fails at link time with undefined
+// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
+// so disable RTTI when detected.
+#   if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
+       !defined(__EXCEPTIONS)
+#    define GTEST_HAS_RTTI 0
+#   else
+#    define GTEST_HAS_RTTI 1
+#   endif  // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif  // __GXX_RTTI
+
+// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
+// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
+// first version with C++ support.
+# elif defined(__clang__)
+
+#  define GTEST_HAS_RTTI __has_feature(cxx_rtti)
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+#  ifdef __RTTI_ALL__
+#   define GTEST_HAS_RTTI 1
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+#  define GTEST_HAS_RTTI 1
+
+# endif  // _MSC_VER
+
+#endif  // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we make reasonable assumptions about
+// which platforms have pthreads support.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \
+    || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NACL)
+#endif  // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h>  // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h>  // NOLINT
+#endif
+
+// Determines if hash_map/hash_set are available.
+// Only used for testing against those containers.
+#if !defined(GTEST_HAS_HASH_MAP_)
+# if _MSC_VER
+#  define GTEST_HAS_HASH_MAP_ 1  // Indicates that hash_map is available.
+#  define GTEST_HAS_HASH_SET_ 1  // Indicates that hash_set is available.
+# endif  // _MSC_VER
+#endif  // !defined(GTEST_HAS_HASH_MAP_)
+
+// Determines whether Google Test can use tr1/tuple.  You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)
+// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.
+#  define GTEST_HAS_TR1_TUPLE 0
+# else
+// The user didn't tell us not to do it, so we assume it's OK.
+#  define GTEST_HAS_TR1_TUPLE 1
+# endif
+#endif  // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already.  At this time, libstdc++ 4.0.0+ and
+// MSVC 2010 are the only mainstream standard libraries that come
+// with a TR1 tuple implementation.  NVIDIA's CUDA NVCC compiler
+// pretends to be GCC by defining __GNUC__ and friends, but cannot
+// compile GCC's tuple implementation.  MSVC 2008 (9.0) provides TR1
+// tuple in a 323 MB Feature Pack download, which we cannot assume the
+// user has.  QNX's QCC compiler is a modified GCC but it doesn't
+// support TR1 tuple.  libc++ only provides std::tuple, in C++11 mode,
+// and it can be used with some compilers that define __GNUC__.
+# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \
+      && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600
+#  define GTEST_ENV_HAS_TR1_TUPLE_ 1
+# endif
+
+// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used
+// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6
+// can build with clang but need to use gcc4.2's libstdc++).
+# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)
+#  define GTEST_ENV_HAS_STD_TUPLE_ 1
+# endif
+
+# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_
+#  define GTEST_USE_OWN_TR1_TUPLE 0
+# else
+#  define GTEST_USE_OWN_TR1_TUPLE 1
+# endif
+
+#endif  // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tuple.
+#if GTEST_HAS_STD_TUPLE_
+# include <tuple>  // IWYU pragma: export
+# define GTEST_TUPLE_NAMESPACE_ ::std
+#endif  // GTEST_HAS_STD_TUPLE_
+
+// We include tr1::tuple even if std::tuple is available to define printers for
+// them.
+#if GTEST_HAS_TR1_TUPLE
+# ifndef GTEST_TUPLE_NAMESPACE_
+#  define GTEST_TUPLE_NAMESPACE_ ::std::tr1
+# endif  // GTEST_TUPLE_NAMESPACE_
+
+# if GTEST_USE_OWN_TR1_TUPLE
+// This file was GENERATED by command:
+//     pump.py gtest-tuple.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility>  // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined).  This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+    template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+   private:
+#endif
+
+// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict
+// with our own definitions. Therefore using our own tuple does not work on
+// those compilers.
+#if defined(_MSC_VER) && _MSC_VER >= 1600  /* 1600 is Visual Studio 2010 */
+# error "gtest's tuple doesn't compile on Visual Studio 2010 or later. \
+GTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers."
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+    void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+    void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+    void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+    void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+    void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+    void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, \
+    typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, \
+    typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior.  We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+    typename T3 = void, typename T4 = void, typename T5 = void,
+    typename T6 = void, typename T7 = void, typename T8 = void,
+    typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; };  // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; };  // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&.  This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; };  // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; };  // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>.  kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T) > {
+  typedef T0 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T) > {
+  typedef T1 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T) > {
+  typedef T2 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T) > {
+  typedef T3 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T) > {
+  typedef T4 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T) > {
+  typedef T5 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T) > {
+  typedef T6 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T) > {
+  typedef T7 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T) > {
+  typedef T8 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T) > {
+  typedef T9 type;
+};
+
+}  // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+  tuple() {}
+  tuple(const tuple& /* t */)  {}
+  tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+  tuple(const tuple& t) : f0_(t.f0_) {}
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    return *this;
+  }
+
+  T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+      f1_(f1) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+  template <typename U0, typename U1>
+  tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+  template <typename U0, typename U1>
+  tuple& operator=(const ::std::pair<U0, U1>& p) {
+    f0_ = p.first;
+    f1_ = p.second;
+    return *this;
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+      GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_) {}
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_) {}
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+      GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5), f6_(f6), f7_(f7) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+      GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    f8_ = t.f8_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+  T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+      f9_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+      GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+      f9_(t.f9_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    f8_ = t.f8_;
+    f9_ = t.f9_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+  T8 f8_;
+  T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple().  And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+  return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+  return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+  return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3) {
+  return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4) {
+  return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5) {
+  return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+  return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+  return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+    const T8& f8) {
+  return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+    const T8& f8, const T9& f9) {
+  return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T) > {
+  static const int value = 0;
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T) > {
+  static const int value = 1;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T) > {
+  static const int value = 2;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T) > {
+  static const int value = 3;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T) > {
+  static const int value = 4;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T) > {
+  static const int value = 5;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T) > {
+  static const int value = 6;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T) > {
+  static const int value = 7;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T) > {
+  static const int value = 8;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T) > {
+  static const int value = 9;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T) > {
+  static const int value = 10;
+};
+
+template <int k, class Tuple>
+struct tuple_element {
+  typedef typename gtest_internal::TupleElement<
+      k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+  Field(Tuple& t) { return t.f0_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+  ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+  Field(Tuple& t) { return t.f1_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+  ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+  Field(Tuple& t) { return t.f2_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+  ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+  Field(Tuple& t) { return t.f3_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+  ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+  Field(Tuple& t) { return t.f4_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+  ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+  Field(Tuple& t) { return t.f5_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+  ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+  Field(Tuple& t) { return t.f6_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+  ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+  Field(Tuple& t) { return t.f7_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+  ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+  Field(Tuple& t) { return t.f8_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+  ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+  Field(Tuple& t) { return t.f9_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+  ConstField(const Tuple& t) { return t.f9_; }
+};
+
+}  // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+  return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k,  GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+  return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+  template <class Tuple1, class Tuple2>
+  static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+    return true;
+  }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+  template <class Tuple1, class Tuple2>
+  static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+    return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+        ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+  }
+};
+
+}  // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+                       const GTEST_10_TUPLE_(U)& u) {
+  return gtest_internal::SameSizeTuplePrefixComparator<
+      tuple_size<GTEST_10_TUPLE_(T) >::value,
+      tuple_size<GTEST_10_TUPLE_(U) >::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+                       const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+}  // namespace tr1
+}  // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+# elif GTEST_ENV_HAS_STD_TUPLE_
+#  include <tuple>
+// C++11 puts its tuple into the ::std namespace rather than
+// ::std::tr1.  gtest expects tuple to live in ::std::tr1, so put it there.
+// This causes undefined behavior, but supported compilers react in
+// the way we intend.
+namespace std {
+namespace tr1 {
+using ::std::get;
+using ::std::make_tuple;
+using ::std::tuple;
+using ::std::tuple_element;
+using ::std::tuple_size;
+}
+}
+
+# elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+#  ifdef BOOST_HAS_TR1_TUPLE
+#   undef BOOST_HAS_TR1_TUPLE
+#  endif  // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+#  define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+#  include <tuple>  // IWYU pragma: export  // NOLINT
+
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header.  This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+#  if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled.  _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>.  Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+#   define _TR1_FUNCTIONAL 1
+#   include <tr1/tuple>
+#   undef _TR1_FUNCTIONAL  // Allows the user to #include
+                        // <tr1/functional> if he chooses to.
+#  else
+#   include <tr1/tuple>  // NOLINT
+#  endif  // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+# else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+#  include <tuple>  // IWYU pragma: export  // NOLINT
+# endif  // GTEST_USE_OWN_TR1_TUPLE
+
+#endif  // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+#  if GTEST_OS_LINUX_ANDROID
+// On Android, clone() became available at different API levels for each 32-bit
+// architecture.
+#    if defined(__LP64__) || \
+        (defined(__arm__) && __ANDROID_API__ >= 9) || \
+        (defined(__mips__) && __ANDROID_API__ >= 12) || \
+        (defined(__i386__) && __ANDROID_API__ >= 17)
+#     define GTEST_HAS_CLONE 1
+#    else
+#     define GTEST_HAS_CLONE 0
+#    endif
+#  else
+#   define GTEST_HAS_CLONE 1
+#  endif
+# else
+#  define GTEST_HAS_CLONE 0
+# endif  // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif  // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || \
+    GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+#  define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+#  define GTEST_HAS_STREAM_REDIRECTION 1
+# endif  // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#endif  // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+     (GTEST_OS_MAC && !GTEST_OS_IOS) || \
+     (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+     GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
+     GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD)
+# define GTEST_HAS_DEATH_TEST 1
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now.  Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+    defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled.  The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+# define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+    (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding.  This leads to problems with code like:
+//
+//   if (gate)
+//     ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default:  // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used.  This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor.  Example:
+//
+//   struct Foo {
+//     Foo() { ... }
+//   } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#elif defined(__clang__)
+# if __has_attribute(unused)
+#  define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+# endif
+#endif
+#ifndef GTEST_ATTRIBUTE_UNUSED_
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+  void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+  type(type const &);\
+  GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro.  The macro should be used on function declarations
+// following the argument list:
+//
+//   Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif  // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// MS C++ compiler emits warning when a conditional expression is compile time
+// constant. In some contexts this warning is false positive and needs to be
+// suppressed. Use the following two macros in such cases:
+//
+// GTEST_INTENTIONAL_CONST_COND_PUSH_()
+// while (true) {
+// GTEST_INTENTIONAL_CONST_COND_POP_()
+// }
+# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \
+    GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127)
+# define GTEST_INTENTIONAL_CONST_COND_POP_() \
+    GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling.  This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+#  define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+#  define GTEST_HAS_SEH 0
+# endif
+
+#define GTEST_IS_THREADSAFE \
+    (GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ \
+     || (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) \
+     || GTEST_HAS_PTHREAD)
+
+#endif  // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+#  define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+#  define GTEST_API_ __declspec(dllexport)
+# endif
+#elif __GNUC__ >= 4 || defined(__clang__)
+# define GTEST_API_ __attribute__((visibility ("default")))
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
+#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)
+# define GTEST_HAS_CXXABI_H_ 1
+#else
+# define GTEST_HAS_CXXABI_H_ 0
+#endif
+
+// A function level attribute to disable checking for use of uninitialized
+// memory when built with MemorySanitizer.
+#if defined(__clang__)
+# if __has_feature(memory_sanitizer)
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \
+       __attribute__((no_sanitize_memory))
+# else
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+# endif  // __has_feature(memory_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+#endif  // __clang__
+
+// A function level attribute to disable AddressSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(address_sanitizer)
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \
+       __attribute__((no_sanitize_address))
+# else
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+# endif  // __has_feature(address_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+#endif  // __clang__
+
+// A function level attribute to disable ThreadSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(thread_sanitizer)
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \
+       __attribute__((no_sanitize_thread))
+# else
+#  define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+# endif  // __has_feature(thread_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+#endif  // __clang__
+
+namespace testing {
+
+class Message;
+
+#if defined(GTEST_TUPLE_NAMESPACE_)
+// Import tuple and friends into the ::testing namespace.
+// It is part of our interface, having them in ::testing allows us to change
+// their types as needed.
+using GTEST_TUPLE_NAMESPACE_::get;
+using GTEST_TUPLE_NAMESPACE_::make_tuple;
+using GTEST_TUPLE_NAMESPACE_::tuple;
+using GTEST_TUPLE_NAMESPACE_::tuple_size;
+using GTEST_TUPLE_NAMESPACE_::tuple_element;
+#endif  // defined(GTEST_TUPLE_NAMESPACE_)
+
+namespace internal {
+
+// A secret type that Google Test users don't know about.  It has no
+// definition on purpose.  Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES,
+//                         names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+#if GTEST_LANG_CXX11
+# define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg)
+#else  // !GTEST_LANG_CXX11
+template <bool>
+  struct CompileAssert {
+};
+
+# define GTEST_COMPILE_ASSERT_(expr, msg) \
+  typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \
+      msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_
+#endif  // !GTEST_LANG_CXX11
+
+// Implementation details of GTEST_COMPILE_ASSERT_:
+//
+// (In C++11, we simply use static_assert instead of the following)
+//
+// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//    #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
+//                                      // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     GTEST_COMPILE_ASSERT_(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
+//
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {
+  enum { value = true };
+};
+
+// Evaluates to the number of elements in 'array'.
+#define GTEST_ARRAY_SIZE_(array) (sizeof(array) / sizeof(array[0]))
+
+#if GTEST_HAS_GLOBAL_STRING
+typedef ::string string;
+#else
+typedef ::std::string string;
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+typedef ::wstring wstring;
+#elif GTEST_HAS_STD_WSTRING
+typedef ::std::wstring wstring;
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+// A helper for suppressing warnings on constant condition.  It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+  typedef T element_type;
+
+  explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+  ~scoped_ptr() { reset(); }
+
+  T& operator*() const { return *ptr_; }
+  T* operator->() const { return ptr_; }
+  T* get() const { return ptr_; }
+
+  T* release() {
+    T* const ptr = ptr_;
+    ptr_ = NULL;
+    return ptr;
+  }
+
+  void reset(T* p = NULL) {
+    if (p != ptr_) {
+      if (IsTrue(sizeof(T) > 0)) {  // Makes sure T is a complete type.
+        delete ptr_;
+      }
+      ptr_ = p;
+    }
+  }
+
+  friend void swap(scoped_ptr& a, scoped_ptr& b) {
+    using std::swap;
+    swap(a.ptr_, b.ptr_);
+  }
+
+ private:
+  T* ptr_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>.  It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+  // A copy constructor is required by the Standard to initialize object
+  // references from r-values.
+  RE(const RE& other) { Init(other.pattern()); }
+
+  // Constructs an RE from a string.
+  RE(const ::std::string& regex) { Init(regex.c_str()); }  // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+
+  RE(const ::string& regex) { Init(regex.c_str()); }  // NOLINT
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+  RE(const char* regex) { Init(regex); }  // NOLINT
+  ~RE();
+
+  // Returns the string representation of the regex.
+  const char* pattern() const { return pattern_; }
+
+  // FullMatch(str, re) returns true iff regular expression re matches
+  // the entire str.
+  // PartialMatch(str, re) returns true iff regular expression re
+  // matches a substring of str (including str itself).
+  //
+  // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+  // when str contains NUL characters.
+  static bool FullMatch(const ::std::string& str, const RE& re) {
+    return FullMatch(str.c_str(), re);
+  }
+  static bool PartialMatch(const ::std::string& str, const RE& re) {
+    return PartialMatch(str.c_str(), re);
+  }
+
+#if GTEST_HAS_GLOBAL_STRING
+
+  static bool FullMatch(const ::string& str, const RE& re) {
+    return FullMatch(str.c_str(), re);
+  }
+  static bool PartialMatch(const ::string& str, const RE& re) {
+    return PartialMatch(str.c_str(), re);
+  }
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+  static bool FullMatch(const char* str, const RE& re);
+  static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+  void Init(const char* regex);
+
+  // We use a const char* instead of an std::string, as Google Test used to be
+  // used where std::string is not available.  TODO(wan@google.com): change to
+  // std::string.
+  const char* pattern_;
+  bool is_valid_;
+
+#if GTEST_USES_POSIX_RE
+
+  regex_t full_regex_;     // For FullMatch().
+  regex_t partial_regex_;  // For PartialMatch().
+
+#else  // GTEST_USES_SIMPLE_RE
+
+  const char* full_pattern_;  // For FullMatch();
+
+#endif
+
+  GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+                                                               int line);
+
+// Defines logging utilities:
+//   GTEST_LOG_(severity) - logs messages at the specified severity level. The
+//                          message itself is streamed into the macro.
+//   LogToStderr()  - directs all log messages to stderr.
+//   FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+  GTEST_INFO,
+  GTEST_WARNING,
+  GTEST_ERROR,
+  GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+  GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+  // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+  ~GTestLog();
+
+  ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+  const GTestLogSeverity severity_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#if !defined(GTEST_LOG_)
+
+# define GTEST_LOG_(severity) \
+    ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+                                  __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+#endif  // !defined(GTEST_LOG_)
+
+#if !defined(GTEST_CHECK_)
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+//  Synopsys:
+//    GTEST_CHECK_(boolean_condition);
+//     or
+//    GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+//    This checks the condition and if the condition is not satisfied
+//    it prints message about the condition violation, including the
+//    condition itself, plus additional message streamed into it, if any,
+//    and then it aborts the program. It aborts the program irrespective of
+//    whether it is built in the debug mode or not.
+# define GTEST_CHECK_(condition) \
+    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+    if (::testing::internal::IsTrue(condition)) \
+      ; \
+    else \
+      GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+#endif  // !defined(GTEST_CHECK_)
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success).  Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+  if (const int gtest_error = (posix_call)) \
+    GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+                      << gtest_error
+
+#if GTEST_HAS_STD_MOVE_
+using std::move;
+#else  // GTEST_HAS_STD_MOVE_
+template <typename T>
+const T& move(const T& t) {
+  return t;
+}
+#endif  // GTEST_HAS_STD_MOVE_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*).  When you use ImplicitCast_, the compiler checks that
+// the cast is safe.  Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+//   ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late.  It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed.  When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo?  It
+// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,
+// when you downcast, you should use this macro.  In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not).  In normal mode, we do the efficient static_cast<>
+// instead.  Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+//    This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From>  // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) {  // so we only accept pointers
+  // Ensures that To is a sub-type of From *.  This test is here only
+  // for compile-time type checking, and has no overhead in an
+  // optimized build at run-time, as it will be optimized away
+  // completely.
+  GTEST_INTENTIONAL_CONST_COND_PUSH_()
+  if (false) {
+  GTEST_INTENTIONAL_CONST_COND_POP_()
+    const To to = NULL;
+    ::testing::internal::ImplicitCast_<From*>(to);
+  }
+
+#if GTEST_HAS_RTTI
+  // RTTI: debug mode only!
+  GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
+#endif
+  return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+  GTEST_CHECK_(typeid(*base) == typeid(Derived));
+#endif
+
+#if GTEST_HAS_DOWNCAST_
+  return ::down_cast<Derived*>(base);
+#elif GTEST_HAS_RTTI
+  return dynamic_cast<Derived*>(base);  // NOLINT
+#else
+  return static_cast<Derived*>(base);  // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+//   CaptureStdout     - starts capturing stdout.
+//   GetCapturedStdout - stops capturing stdout and returns the captured string.
+//   CaptureStderr     - starts capturing stderr.
+//   GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ std::string GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ std::string GetCapturedStderr();
+
+#endif  // GTEST_HAS_STREAM_REDIRECTION
+
+// Returns a path to temporary directory.
+GTEST_API_ std::string TempDir();
+
+// Returns the size (in bytes) of a file.
+GTEST_API_ size_t GetFileSize(FILE* file);
+
+// Reads the entire content of a file as a string.
+GTEST_API_ std::string ReadEntireFile(FILE* file);
+
+// All command line arguments.
+GTEST_API_ const ::std::vector<testing::internal::string>& GetArgvs();
+
+#if GTEST_HAS_DEATH_TEST
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs();
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>*
+                             new_argvs);
+
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+#if GTEST_IS_THREADSAFE
+# if GTEST_HAS_PTHREAD
+// Sleeps for (roughly) n milliseconds.  This function is only for testing
+// Google Test's own constructs.  Don't use it in user tests, either
+// directly or indirectly.
+inline void SleepMilliseconds(int n) {
+  const timespec time = {
+    0,                  // 0 seconds.
+    n * 1000L * 1000L,  // And n ms.
+  };
+  nanosleep(&time, NULL);
+}
+# endif  // GTEST_HAS_PTHREAD
+
+# if GTEST_HAS_NOTIFICATION_
+// Notification has already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_HAS_PTHREAD
+// Allows a controller thread to pause execution of newly created
+// threads until notified.  Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+  Notification() : notified_(false) {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+  }
+  ~Notification() {
+    pthread_mutex_destroy(&mutex_);
+  }
+
+  // Notifies all threads created with this notification to start. Must
+  // be called from the controller thread.
+  void Notify() {
+    pthread_mutex_lock(&mutex_);
+    notified_ = true;
+    pthread_mutex_unlock(&mutex_);
+  }
+
+  // Blocks until the controller thread notifies. Must be called from a test
+  // thread.
+  void WaitForNotification() {
+    for (;;) {
+      pthread_mutex_lock(&mutex_);
+      const bool notified = notified_;
+      pthread_mutex_unlock(&mutex_);
+      if (notified)
+        break;
+      SleepMilliseconds(10);
+    }
+  }
+
+ private:
+  pthread_mutex_t mutex_;
+  bool notified_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+GTEST_API_ void SleepMilliseconds(int n);
+
+// Provides leak-safe Windows kernel handle ownership.
+// Used in death tests and in threading support.
+class GTEST_API_ AutoHandle {
+ public:
+  // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to
+  // avoid including <windows.h> in this header file. Including <windows.h> is
+  // undesirable because it defines a lot of symbols and macros that tend to
+  // conflict with client code. This assumption is verified by
+  // WindowsTypesTest.HANDLEIsVoidStar.
+  typedef void* Handle;
+  AutoHandle();
+  explicit AutoHandle(Handle handle);
+
+  ~AutoHandle();
+
+  Handle Get() const;
+  void Reset();
+  void Reset(Handle handle);
+
+ private:
+  // Returns true iff the handle is a valid handle object that can be closed.
+  bool IsCloseable() const;
+
+  Handle handle_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified.  Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class GTEST_API_ Notification {
+ public:
+  Notification();
+  void Notify();
+  void WaitForNotification();
+
+ private:
+  AutoHandle event_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+# endif  // GTEST_HAS_NOTIFICATION_
+
+// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD
+// defined, but we don't want to use MinGW's pthreads implementation, which
+// has conformance problems with some versions of the POSIX standard.
+# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+  virtual ~ThreadWithParamBase() {}
+  virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical.  Some compilers (for
+// example, SunStudio) treat them as different types.  Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+  static_cast<ThreadWithParamBase*>(thread)->Run();
+  return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+//   void ThreadFunc(int param) { /* Do things with param */ }
+//   Notification thread_can_start;
+//   ...
+//   // The thread_can_start parameter is optional; you can supply NULL.
+//   ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+//   thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+  typedef void UserThreadFunc(T);
+
+  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+      : func_(func),
+        param_(param),
+        thread_can_start_(thread_can_start),
+        finished_(false) {
+    ThreadWithParamBase* const base = this;
+    // The thread can be created only after all fields except thread_
+    // have been initialized.
+    GTEST_CHECK_POSIX_SUCCESS_(
+        pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+  }
+  ~ThreadWithParam() { Join(); }
+
+  void Join() {
+    if (!finished_) {
+      GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+      finished_ = true;
+    }
+  }
+
+  virtual void Run() {
+    if (thread_can_start_ != NULL)
+      thread_can_start_->WaitForNotification();
+    func_(param_);
+  }
+
+ private:
+  UserThreadFunc* const func_;  // User-supplied thread function.
+  const T param_;  // User-supplied parameter to the thread function.
+  // When non-NULL, used to block execution until the controller thread
+  // notifies.
+  Notification* const thread_can_start_;
+  bool finished_;  // true iff we know that the thread function has finished.
+  pthread_t thread_;  // The native thread object.
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+# endif  // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD ||
+         // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+// Mutex and ThreadLocal have already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+// Mutex implements mutex on Windows platforms.  It is used in conjunction
+// with class MutexLock:
+//
+//   Mutex mutex;
+//   ...
+//   MutexLock lock(&mutex);  // Acquires the mutex and releases it at the
+//                            // end of the current scope.
+//
+// A static Mutex *must* be defined or declared using one of the following
+// macros:
+//   GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//   GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// (A non-static Mutex is defined/declared in the usual way).
+class GTEST_API_ Mutex {
+ public:
+  enum MutexType { kStatic = 0, kDynamic = 1 };
+  // We rely on kStaticMutex being 0 as it is to what the linker initializes
+  // type_ in static mutexes.  critical_section_ will be initialized lazily
+  // in ThreadSafeLazyInit().
+  enum StaticConstructorSelector { kStaticMutex = 0 };
+
+  // This constructor intentionally does nothing.  It relies on type_ being
+  // statically initialized to 0 (effectively setting it to kStatic) and on
+  // ThreadSafeLazyInit() to lazily initialize the rest of the members.
+  explicit Mutex(StaticConstructorSelector /*dummy*/) {}
+
+  Mutex();
+  ~Mutex();
+
+  void Lock();
+
+  void Unlock();
+
+  // Does nothing if the current thread holds the mutex. Otherwise, crashes
+  // with high probability.
+  void AssertHeld();
+
+ private:
+  // Initializes owner_thread_id_ and critical_section_ in static mutexes.
+  void ThreadSafeLazyInit();
+
+  // Per http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx,
+  // we assume that 0 is an invalid value for thread IDs.
+  unsigned int owner_thread_id_;
+
+  // For static mutexes, we rely on these members being initialized to zeros
+  // by the linker.
+  MutexType type_;
+  long critical_section_init_phase_;  // NOLINT
+  GTEST_CRITICAL_SECTION* critical_section_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+    extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+    ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex)
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)".  Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+  explicit GTestMutexLock(Mutex* mutex)
+      : mutex_(mutex) { mutex_->Lock(); }
+
+  ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+  Mutex* const mutex_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Base class for ValueHolder<T>.  Allows a caller to hold and delete a value
+// without knowing its type.
+class ThreadLocalValueHolderBase {
+ public:
+  virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Provides a way for a thread to send notifications to a ThreadLocal
+// regardless of its parameter type.
+class ThreadLocalBase {
+ public:
+  // Creates a new ValueHolder<T> object holding a default value passed to
+  // this ThreadLocal<T>'s constructor and returns it.  It is the caller's
+  // responsibility not to call this when the ThreadLocal<T> instance already
+  // has a value on the current thread.
+  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0;
+
+ protected:
+  ThreadLocalBase() {}
+  virtual ~ThreadLocalBase() {}
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase);
+};
+
+// Maps a thread to a set of ThreadLocals that have values instantiated on that
+// thread and notifies them when the thread exits.  A ThreadLocal instance is
+// expected to persist until all threads it has values on have terminated.
+class GTEST_API_ ThreadLocalRegistry {
+ public:
+  // Registers thread_local_instance as having value on the current thread.
+  // Returns a value that can be used to identify the thread from other threads.
+  static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+      const ThreadLocalBase* thread_local_instance);
+
+  // Invoked when a ThreadLocal instance is destroyed.
+  static void OnThreadLocalDestroyed(
+      const ThreadLocalBase* thread_local_instance);
+};
+
+class GTEST_API_ ThreadWithParamBase {
+ public:
+  void Join();
+
+ protected:
+  class Runnable {
+   public:
+    virtual ~Runnable() {}
+    virtual void Run() = 0;
+  };
+
+  ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start);
+  virtual ~ThreadWithParamBase();
+
+ private:
+  AutoHandle thread_;
+};
+
+// Helper class for testing Google Test's multi-threading constructs.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+  typedef void UserThreadFunc(T);
+
+  ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+      : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {
+  }
+  virtual ~ThreadWithParam() {}
+
+ private:
+  class RunnableImpl : public Runnable {
+   public:
+    RunnableImpl(UserThreadFunc* func, T param)
+        : func_(func),
+          param_(param) {
+    }
+    virtual ~RunnableImpl() {}
+    virtual void Run() {
+      func_(param_);
+    }
+
+   private:
+    UserThreadFunc* const func_;
+    const T param_;
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl);
+  };
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// Implements thread-local storage on Windows systems.
+//
+//   // Thread 1
+//   ThreadLocal<int> tl(100);  // 100 is the default value for each thread.
+//
+//   // Thread 2
+//   tl.set(150);  // Changes the value for thread 2 only.
+//   EXPECT_EQ(150, tl.get());
+//
+//   // Thread 1
+//   EXPECT_EQ(100, tl.get());  // In thread 1, tl has the original value.
+//   tl.set(200);
+//   EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// The users of a TheadLocal instance have to make sure that all but one
+// threads (including the main one) using that instance have exited before
+// destroying it. Otherwise, the per-thread objects managed for them by the
+// ThreadLocal instance are not guaranteed to be destroyed on all platforms.
+//
+// Google Test only uses global ThreadLocal objects.  That means they
+// will die after main() has returned.  Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal : public ThreadLocalBase {
+ public:
+  ThreadLocal() : default_factory_(new DefaultValueHolderFactory()) {}
+  explicit ThreadLocal(const T& value)
+      : default_factory_(new InstanceValueHolderFactory(value)) {}
+
+  ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); }
+
+  T* pointer() { return GetOrCreateValue(); }
+  const T* pointer() const { return GetOrCreateValue(); }
+  const T& get() const { return *pointer(); }
+  void set(const T& value) { *pointer() = value; }
+
+ private:
+  // Holds a value of T.  Can be deleted via its base class without the caller
+  // knowing the type of T.
+  class ValueHolder : public ThreadLocalValueHolderBase {
+   public:
+    ValueHolder() : value_() {}
+    explicit ValueHolder(const T& value) : value_(value) {}
+
+    T* pointer() { return &value_; }
+
+   private:
+    T value_;
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+  };
+
+
+  T* GetOrCreateValue() const {
+    return static_cast<ValueHolder*>(
+        ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer();
+  }
+
+  virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const {
+    return default_factory_->MakeNewHolder();
+  }
+
+  class ValueHolderFactory {
+   public:
+    ValueHolderFactory() {}
+    virtual ~ValueHolderFactory() {}
+    virtual ValueHolder* MakeNewHolder() const = 0;
+
+   private:
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+  };
+
+  class DefaultValueHolderFactory : public ValueHolderFactory {
+   public:
+    DefaultValueHolderFactory() {}
+    virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }
+
+   private:
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+  };
+
+  class InstanceValueHolderFactory : public ValueHolderFactory {
+   public:
+    explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+    virtual ValueHolder* MakeNewHolder() const {
+      return new ValueHolder(value_);
+    }
+
+   private:
+    const T value_;  // The value for each thread.
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+  };
+
+  scoped_ptr<ValueHolderFactory> default_factory_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# elif GTEST_HAS_PTHREAD
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms.
+class MutexBase {
+ public:
+  // Acquires this mutex.
+  void Lock() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+    owner_ = pthread_self();
+    has_owner_ = true;
+  }
+
+  // Releases this mutex.
+  void Unlock() {
+    // Since the lock is being released the owner_ field should no longer be
+    // considered valid. We don't protect writing to has_owner_ here, as it's
+    // the caller's responsibility to ensure that the current thread holds the
+    // mutex when this is called.
+    has_owner_ = false;
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+  }
+
+  // Does nothing if the current thread holds the mutex. Otherwise, crashes
+  // with high probability.
+  void AssertHeld() const {
+    GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
+        << "The current thread is not holding the mutex @" << this;
+  }
+
+  // A static mutex may be used before main() is entered.  It may even
+  // be used before the dynamic initialization stage.  Therefore we
+  // must be able to initialize a static mutex object at link time.
+  // This means MutexBase has to be a POD and its member variables
+  // have to be public.
+ public:
+  pthread_mutex_t mutex_;  // The underlying pthread mutex.
+  // has_owner_ indicates whether the owner_ field below contains a valid thread
+  // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
+  // accesses to the owner_ field should be protected by a check of this field.
+  // An alternative might be to memset() owner_ to all zeros, but there's no
+  // guarantee that a zero'd pthread_t is necessarily invalid or even different
+  // from pthread_self().
+  bool has_owner_;
+  pthread_t owner_;  // The thread holding the mutex.
+};
+
+// Forward-declares a static mutex.
+#  define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+     extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+#  define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+     ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false, pthread_t() }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+  Mutex() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+    has_owner_ = false;
+  }
+  ~Mutex() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)".  Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+  explicit GTestMutexLock(MutexBase* mutex)
+      : mutex_(mutex) { mutex_->Lock(); }
+
+  ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+  MutexBase* const mutex_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage.  Therefore it cannot be templatized to access
+// ThreadLocal<T>.  Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+  virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+  delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+template <typename T>
+class ThreadLocal {
+ public:
+  ThreadLocal()
+      : key_(CreateKey()), default_factory_(new DefaultValueHolderFactory()) {}
+  explicit ThreadLocal(const T& value)
+      : key_(CreateKey()),
+        default_factory_(new InstanceValueHolderFactory(value)) {}
+
+  ~ThreadLocal() {
+    // Destroys the managed object for the current thread, if any.
+    DeleteThreadLocalValue(pthread_getspecific(key_));
+
+    // Releases resources associated with the key.  This will *not*
+    // delete managed objects for other threads.
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+  }
+
+  T* pointer() { return GetOrCreateValue(); }
+  const T* pointer() const { return GetOrCreateValue(); }
+  const T& get() const { return *pointer(); }
+  void set(const T& value) { *pointer() = value; }
+
+ private:
+  // Holds a value of type T.
+  class ValueHolder : public ThreadLocalValueHolderBase {
+   public:
+    ValueHolder() : value_() {}
+    explicit ValueHolder(const T& value) : value_(value) {}
+
+    T* pointer() { return &value_; }
+
+   private:
+    T value_;
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+  };
+
+  static pthread_key_t CreateKey() {
+    pthread_key_t key;
+    // When a thread exits, DeleteThreadLocalValue() will be called on
+    // the object managed for that thread.
+    GTEST_CHECK_POSIX_SUCCESS_(
+        pthread_key_create(&key, &DeleteThreadLocalValue));
+    return key;
+  }
+
+  T* GetOrCreateValue() const {
+    ThreadLocalValueHolderBase* const holder =
+        static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+    if (holder != NULL) {
+      return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+    }
+
+    ValueHolder* const new_holder = default_factory_->MakeNewHolder();
+    ThreadLocalValueHolderBase* const holder_base = new_holder;
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+    return new_holder->pointer();
+  }
+
+  class ValueHolderFactory {
+   public:
+    ValueHolderFactory() {}
+    virtual ~ValueHolderFactory() {}
+    virtual ValueHolder* MakeNewHolder() const = 0;
+
+   private:
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+  };
+
+  class DefaultValueHolderFactory : public ValueHolderFactory {
+   public:
+    DefaultValueHolderFactory() {}
+    virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }
+
+   private:
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+  };
+
+  class InstanceValueHolderFactory : public ValueHolderFactory {
+   public:
+    explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+    virtual ValueHolder* MakeNewHolder() const {
+      return new ValueHolder(value_);
+    }
+
+   private:
+    const T value_;  // The value for each thread.
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+  };
+
+  // A key pthreads uses for looking up per-thread values.
+  const pthread_key_t key_;
+  scoped_ptr<ValueHolderFactory> default_factory_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# endif  // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+#else  // GTEST_IS_THREADSAFE
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable).  Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+  Mutex() {}
+  void Lock() {}
+  void Unlock() {}
+  void AssertHeld() const {}
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+  extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)".  Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+  explicit GTestMutexLock(Mutex*) {}  // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class ThreadLocal {
+ public:
+  ThreadLocal() : value_() {}
+  explicit ThreadLocal(const T& value) : value_(value) {}
+  T* pointer() { return &value_; }
+  const T* pointer() const { return &value_; }
+  const T& get() const { return value_; }
+  void set(const T& value) { value_ = value; }
+ private:
+  T value_;
+};
+
+#endif  // GTEST_IS_THREADSAFE
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+// Passing non-POD classes through ellipsis (...) crashes the ARM
+// compiler and generates a warning in Sun Studio.  The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects.  We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+# define GTEST_CAN_COMPARE_NULL 1
+#endif
+
+// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
+// const T& and const T* in a function template.  These compilers
+// _can_ decide between class template specializations for T and T*,
+// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+# define GTEST_NEEDS_IS_POINTER_ 1
+#endif
+
+template <bool bool_value>
+struct bool_constant {
+  typedef bool_constant<bool_value> type;
+  static const bool value = bool_value;
+};
+template <bool bool_value> const bool bool_constant<bool_value>::value;
+
+typedef bool_constant<false> false_type;
+typedef bool_constant<true> true_type;
+
+template <typename T>
+struct is_pointer : public false_type {};
+
+template <typename T>
+struct is_pointer<T*> : public true_type {};
+
+template <typename Iterator>
+struct IteratorTraits {
+  typedef typename Iterator::value_type value_type;
+};
+
+template <typename T>
+struct IteratorTraits<T*> {
+  typedef T value_type;
+};
+
+template <typename T>
+struct IteratorTraits<const T*> {
+  typedef T value_type;
+};
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_SEP_ "\\"
+# define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
+typedef __int64 BiggestInt;
+#else
+# define GTEST_PATH_SEP_ "/"
+# define GTEST_HAS_ALT_PATH_SEP_ 0
+typedef long long BiggestInt;  // NOLINT
+#endif  // GTEST_OS_WINDOWS
+
+// Utilities for char.
+
+// isspace(int ch) and friends accept an unsigned char or EOF.  char
+// may be signed, depending on the compiler (or compiler flags).
+// Therefore we need to cast a char to unsigned char before calling
+// isspace(), etc.
+
+inline bool IsAlpha(char ch) {
+  return isalpha(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsAlNum(char ch) {
+  return isalnum(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsDigit(char ch) {
+  return isdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsLower(char ch) {
+  return islower(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsSpace(char ch) {
+  return isspace(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsUpper(char ch) {
+  return isupper(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(char ch) {
+  return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(wchar_t ch) {
+  const unsigned char low_byte = static_cast<unsigned char>(ch);
+  return ch == low_byte && isxdigit(low_byte) != 0;
+}
+
+inline char ToLower(char ch) {
+  return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
+}
+inline char ToUpper(char ch) {
+  return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
+}
+
+inline std::string StripTrailingSpaces(std::string str) {
+  std::string::iterator it = str.end();
+  while (it != str.begin() && IsSpace(*--it))
+    it = str.erase(it);
+  return str;
+}
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions.  These wrappers hide the differences between
+// Windows/MSVC and POSIX systems.  Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+# ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+  return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+# else  // !__BORLANDC__
+#  if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+#  else
+inline int IsATTY(int fd) { return _isatty(fd); }
+#  endif  // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+  return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+# endif  // __BORLANDC__
+
+# if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+# else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+  return (_S_IFDIR & st.st_mode) != 0;
+}
+# endif  // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+  return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif  // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996 /* deprecated function */)
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+  return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+  return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+  return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+  return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+  return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE | GTEST_OS_WINDOWS_RT
+  // We are on Windows CE, which has no environment variables.
+  static_cast<void>(name);  // To prevent 'unused argument' warning.
+  return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+  // Environment variables which we programmatically clear will be set to the
+  // empty string rather than unset (NULL).  Handle that case.
+  const char* const env = getenv(name);
+  return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+  return getenv(name);
+#endif
+}
+
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif  // GTEST_OS_WINDOWS_MOBILE
+
+}  // namespace posix
+
+// MSVC "deprecates" snprintf and issues warnings wherever it is used.  In
+// order to avoid these warnings, we need to use _snprintf or _snprintf_s on
+// MSVC-based platforms.  We map the GTEST_SNPRINTF_ macro to the appropriate
+// function in order to achieve that.  We use macro definition here because
+// snprintf is a variadic function.
+#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+// MSVC 2005 and above support variadic macros.
+# define GTEST_SNPRINTF_(buffer, size, format, ...) \
+     _snprintf_s(buffer, size, size, format, __VA_ARGS__)
+#elif defined(_MSC_VER)
+// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't
+// complain about _snprintf.
+# define GTEST_SNPRINTF_ _snprintf
+#else
+# define GTEST_SNPRINTF_ snprintf
+#endif
+
+// The maximum number a BiggestInt can represent.  This definition
+// works no matter BiggestInt is represented in one's complement or
+// two's complement.
+//
+// We cannot rely on numeric_limits in STL, as __int64 and long long
+// are not part of standard C++ and numeric_limits doesn't need to be
+// defined for them.
+const BiggestInt kMaxBiggestInt =
+    ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
+
+// This template class serves as a compile-time function from size to
+// type.  It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+//   TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs.  Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+  // This prevents the user from using TypeWithSize<N> with incorrect
+  // values of N.
+  typedef void UInt;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+  // unsigned int has size 4 in both gcc and MSVC.
+  //
+  // As base/basictypes.h doesn't compile on Windows, we cannot use
+  // uint32, uint64, and etc here.
+  typedef int Int;
+  typedef unsigned int UInt;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+#if GTEST_OS_WINDOWS
+  typedef __int64 Int;
+  typedef unsigned __int64 UInt;
+#else
+  typedef long long Int;  // NOLINT
+  typedef unsigned long long UInt;  // NOLINT
+#endif  // GTEST_OS_WINDOWS
+};
+
+// Integer types of known sizes.
+typedef TypeWithSize<4>::Int Int32;
+typedef TypeWithSize<4>::UInt UInt32;
+typedef TypeWithSize<8>::Int Int64;
+typedef TypeWithSize<8>::UInt UInt64;
+typedef TypeWithSize<8>::Int TimeInMillis;  // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#if !defined(GTEST_FLAG)
+# define GTEST_FLAG(name) FLAGS_gtest_##name
+#endif  // !defined(GTEST_FLAG)
+
+#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1
+#endif  // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+
+#if !defined(GTEST_DECLARE_bool_)
+# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver
+
+// Macros for declaring flags.
+# define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+# define GTEST_DECLARE_int32_(name) \
+    GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
+#define GTEST_DECLARE_string_(name) \
+    GTEST_API_ extern ::std::string GTEST_FLAG(name)
+
+// Macros for defining flags.
+#define GTEST_DEFINE_bool_(name, default_val, doc) \
+    GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_int32_(name, default_val, doc) \
+    GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_string_(name, default_val, doc) \
+    GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)
+
+#endif  // !defined(GTEST_DECLARE_bool_)
+
+// Thread annotations
+#if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+# define GTEST_LOCK_EXCLUDED_(locks)
+#endif  // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+
+// Parses 'str' for a 32-bit signed integer.  If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+// TODO(chandlerc): Find a better way to refactor flag and environment parsing
+// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
+// function.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+std::string StringFromGTestEnv(const char* flag, const char* default_val);
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+#if GTEST_OS_LINUX
+# include <stdlib.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+#endif  // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#include <ctype.h>
+#include <float.h>
+#include <string.h>
+#include <iomanip>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+
+
+// Ensures that there is at least one operator<< in the global namespace.
+// See Message& operator<<(...) below for why.
+void operator<<(const testing::internal::Secret&, int);
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+//   1. You stream a bunch of values to a Message object.
+//      It will remember the text in a stringstream.
+//   2. Then you stream the Message object to an ostream.
+//      This causes the text in the Message to be streamed
+//      to the ostream.
+//
+// For example;
+//
+//   testing::Message foo;
+//   foo << 1 << " != " << 2;
+//   std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from.  In particular, its
+// destructor is not virtual.
+//
+// Note that stringstream behaves differently in gcc and in MSVC.  You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do).  The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+  // The type of basic IO manipulators (endl, ends, and flush) for
+  // narrow streams.
+  typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+  // Constructs an empty Message.
+  Message();
+
+  // Copy constructor.
+  Message(const Message& msg) : ss_(new ::std::stringstream) {  // NOLINT
+    *ss_ << msg.GetString();
+  }
+
+  // Constructs a Message from a C-string.
+  explicit Message(const char* str) : ss_(new ::std::stringstream) {
+    *ss_ << str;
+  }
+
+#if GTEST_OS_SYMBIAN
+  // Streams a value (either a pointer or not) to this object.
+  template <typename T>
+  inline Message& operator <<(const T& value) {
+    StreamHelper(typename internal::is_pointer<T>::type(), value);
+    return *this;
+  }
+#else
+  // Streams a non-pointer value to this object.
+  template <typename T>
+  inline Message& operator <<(const T& val) {
+    // Some libraries overload << for STL containers.  These
+    // overloads are defined in the global namespace instead of ::std.
+    //
+    // C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+    // overloads are visible in either the std namespace or the global
+    // namespace, but not other namespaces, including the testing
+    // namespace which Google Test's Message class is in.
+    //
+    // To allow STL containers (and other types that has a << operator
+    // defined in the global namespace) to be used in Google Test
+    // assertions, testing::Message must access the custom << operator
+    // from the global namespace.  With this using declaration,
+    // overloads of << defined in the global namespace and those
+    // visible via Koenig lookup are both exposed in this function.
+    using ::operator <<;
+    *ss_ << val;
+    return *this;
+  }
+
+  // Streams a pointer value to this object.
+  //
+  // This function is an overload of the previous one.  When you
+  // stream a pointer to a Message, this definition will be used as it
+  // is more specialized.  (The C++ Standard, section
+  // [temp.func.order].)  If you stream a non-pointer, then the
+  // previous definition will be used.
+  //
+  // The reason for this overload is that streaming a NULL pointer to
+  // ostream is undefined behavior.  Depending on the compiler, you
+  // may get "0", "(nil)", "(null)", or an access violation.  To
+  // ensure consistent result across compilers, we always treat NULL
+  // as "(null)".
+  template <typename T>
+  inline Message& operator <<(T* const& pointer) {  // NOLINT
+    if (pointer == NULL) {
+      *ss_ << "(null)";
+    } else {
+      *ss_ << pointer;
+    }
+    return *this;
+  }
+#endif  // GTEST_OS_SYMBIAN
+
+  // Since the basic IO manipulators are overloaded for both narrow
+  // and wide streams, we have to provide this specialized definition
+  // of operator <<, even though its body is the same as the
+  // templatized version above.  Without this definition, streaming
+  // endl or other basic IO manipulators to Message will confuse the
+  // compiler.
+  Message& operator <<(BasicNarrowIoManip val) {
+    *ss_ << val;
+    return *this;
+  }
+
+  // Instead of 1/0, we want to see true/false for bool values.
+  Message& operator <<(bool b) {
+    return *this << (b ? "true" : "false");
+  }
+
+  // These two overloads allow streaming a wide C string to a Message
+  // using the UTF-8 encoding.
+  Message& operator <<(const wchar_t* wide_c_str);
+  Message& operator <<(wchar_t* wide_c_str);
+
+#if GTEST_HAS_STD_WSTRING
+  // Converts the given wide string to a narrow string using the UTF-8
+  // encoding, and streams the result to this Message object.
+  Message& operator <<(const ::std::wstring& wstr);
+#endif  // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+  // Converts the given wide string to a narrow string using the UTF-8
+  // encoding, and streams the result to this Message object.
+  Message& operator <<(const ::wstring& wstr);
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+  // Gets the text streamed to this object so far as an std::string.
+  // Each '\0' character in the buffer is replaced with "\\0".
+  //
+  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+  std::string GetString() const;
+
+ private:
+
+#if GTEST_OS_SYMBIAN
+  // These are needed as the Nokia Symbian Compiler cannot decide between
+  // const T& and const T* in a function template. The Nokia compiler _can_
+  // decide between class template specializations for T and T*, so a
+  // tr1::type_traits-like is_pointer works, and we can overload on that.
+  template <typename T>
+  inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) {
+    if (pointer == NULL) {
+      *ss_ << "(null)";
+    } else {
+      *ss_ << pointer;
+    }
+  }
+  template <typename T>
+  inline void StreamHelper(internal::false_type /*is_pointer*/,
+                           const T& value) {
+    // See the comments in Message& operator <<(const T&) above for why
+    // we need this using statement.
+    using ::operator <<;
+    *ss_ << value;
+  }
+#endif  // GTEST_OS_SYMBIAN
+
+  // We'll hold the text streamed to this object here.
+  const internal::scoped_ptr< ::std::stringstream> ss_;
+
+  // We declare (but don't implement) this to prevent the compiler
+  // from implementing the assignment operator.
+  void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+  return os << sb.GetString();
+}
+
+namespace internal {
+
+// Converts a streamable value to an std::string.  A NULL pointer is
+// converted to "(null)".  When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+template <typename T>
+std::string StreamableToString(const T& streamable) {
+  return (Message() << streamable).GetString();
+}
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test.  They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by <gtest/internal/gtest-internal.h>.
+// It should not be #included by other files.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+# include <mem.h>
+#endif
+
+#include <string.h>
+#include <string>
+
+
+namespace testing {
+namespace internal {
+
+// String - an abstract class holding static string utilities.
+class GTEST_API_ String {
+ public:
+  // Static utility methods
+
+  // Clones a 0-terminated C string, allocating memory using new.  The
+  // caller is responsible for deleting the return value using
+  // delete[].  Returns the cloned string, or NULL if the input is
+  // NULL.
+  //
+  // This is different from strdup() in string.h, which allocates
+  // memory using malloc().
+  static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+  // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+  // able to pass strings to Win32 APIs on CE we need to convert them
+  // to 'Unicode', UTF-16.
+
+  // Creates a UTF-16 wide string from the given ANSI string, allocating
+  // memory using new. The caller is responsible for deleting the return
+  // value using delete[]. Returns the wide string, or NULL if the
+  // input is NULL.
+  //
+  // The wide string is created using the ANSI codepage (CP_ACP) to
+  // match the behaviour of the ANSI versions of Win32 calls and the
+  // C runtime.
+  static LPCWSTR AnsiToUtf16(const char* c_str);
+
+  // Creates an ANSI string from the given wide string, allocating
+  // memory using new. The caller is responsible for deleting the return
+  // value using delete[]. Returns the ANSI string, or NULL if the
+  // input is NULL.
+  //
+  // The returned string is created using the ANSI codepage (CP_ACP) to
+  // match the behaviour of the ANSI versions of Win32 calls and the
+  // C runtime.
+  static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+  // Compares two C strings.  Returns true iff they have the same content.
+  //
+  // Unlike strcmp(), this function can handle NULL argument(s).  A
+  // NULL C string is considered different to any non-NULL C string,
+  // including the empty string.
+  static bool CStringEquals(const char* lhs, const char* rhs);
+
+  // Converts a wide C string to a String using the UTF-8 encoding.
+  // NULL will be converted to "(null)".  If an error occurred during
+  // the conversion, "(failed to convert from wide string)" is
+  // returned.
+  static std::string ShowWideCString(const wchar_t* wide_c_str);
+
+  // Compares two wide C strings.  Returns true iff they have the same
+  // content.
+  //
+  // Unlike wcscmp(), this function can handle NULL argument(s).  A
+  // NULL C string is considered different to any non-NULL C string,
+  // including the empty string.
+  static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+  // Compares two C strings, ignoring case.  Returns true iff they
+  // have the same content.
+  //
+  // Unlike strcasecmp(), this function can handle NULL argument(s).
+  // A NULL C string is considered different to any non-NULL C string,
+  // including the empty string.
+  static bool CaseInsensitiveCStringEquals(const char* lhs,
+                                           const char* rhs);
+
+  // Compares two wide C strings, ignoring case.  Returns true iff they
+  // have the same content.
+  //
+  // Unlike wcscasecmp(), this function can handle NULL argument(s).
+  // A NULL C string is considered different to any non-NULL wide C string,
+  // including the empty string.
+  // NB: The implementations on different platforms slightly differ.
+  // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+  // environment variable. On GNU platform this method uses wcscasecmp
+  // which compares according to LC_CTYPE category of the current locale.
+  // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+  // current locale.
+  static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+                                               const wchar_t* rhs);
+
+  // Returns true iff the given string ends with the given suffix, ignoring
+  // case. Any string is considered to end with an empty suffix.
+  static bool EndsWithCaseInsensitive(
+      const std::string& str, const std::string& suffix);
+
+  // Formats an int value as "%02d".
+  static std::string FormatIntWidth2(int value);  // "%02d" for width == 2
+
+  // Formats an int value as "%X".
+  static std::string FormatHexInt(int value);
+
+  // Formats a byte as "%02X".
+  static std::string FormatByte(unsigned char value);
+
+ private:
+  String();  // Not meant to be instantiated.
+};  // class String
+
+// Gets the content of the stringstream's buffer as an std::string.  Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ std::string StringStreamToString(::std::stringstream* stream);
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keith.ray@gmail.com (Keith Ray)
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test.  They are subject to change without notice.
+//
+// This file is #included in <gtest/internal/gtest-internal.h>.
+// Do not include this header file separately!
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+  FilePath() : pathname_("") { }
+  FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+  explicit FilePath(const std::string& pathname) : pathname_(pathname) {
+    Normalize();
+  }
+
+  FilePath& operator=(const FilePath& rhs) {
+    Set(rhs);
+    return *this;
+  }
+
+  void Set(const FilePath& rhs) {
+    pathname_ = rhs.pathname_;
+  }
+
+  const std::string& string() const { return pathname_; }
+  const char* c_str() const { return pathname_.c_str(); }
+
+  // Returns the current working directory, or "" if unsuccessful.
+  static FilePath GetCurrentDir();
+
+  // Given directory = "dir", base_name = "test", number = 0,
+  // extension = "xml", returns "dir/test.xml". If number is greater
+  // than zero (e.g., 12), returns "dir/test_12.xml".
+  // On Windows platform, uses \ as the separator rather than /.
+  static FilePath MakeFileName(const FilePath& directory,
+                               const FilePath& base_name,
+                               int number,
+                               const char* extension);
+
+  // Given directory = "dir", relative_path = "test.xml",
+  // returns "dir/test.xml".
+  // On Windows, uses \ as the separator rather than /.
+  static FilePath ConcatPaths(const FilePath& directory,
+                              const FilePath& relative_path);
+
+  // Returns a pathname for a file that does not currently exist. The pathname
+  // will be directory/base_name.extension or
+  // directory/base_name_<number>.extension if directory/base_name.extension
+  // already exists. The number will be incremented until a pathname is found
+  // that does not already exist.
+  // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+  // There could be a race condition if two or more processes are calling this
+  // function at the same time -- they could both pick the same filename.
+  static FilePath GenerateUniqueFileName(const FilePath& directory,
+                                         const FilePath& base_name,
+                                         const char* extension);
+
+  // Returns true iff the path is "".
+  bool IsEmpty() const { return pathname_.empty(); }
+
+  // If input name has a trailing separator character, removes it and returns
+  // the name, otherwise return the name string unmodified.
+  // On Windows platform, uses \ as the separator, other platforms use /.
+  FilePath RemoveTrailingPathSeparator() const;
+
+  // Returns a copy of the FilePath with the directory part removed.
+  // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+  // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+  // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+  // returns an empty FilePath ("").
+  // On Windows platform, '\' is the path separator, otherwise it is '/'.
+  FilePath RemoveDirectoryName() const;
+
+  // RemoveFileName returns the directory path with the filename removed.
+  // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+  // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+  // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+  // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+  // On Windows platform, '\' is the path separator, otherwise it is '/'.
+  FilePath RemoveFileName() const;
+
+  // Returns a copy of the FilePath with the case-insensitive extension removed.
+  // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+  // FilePath("dir/file"). If a case-insensitive extension is not
+  // found, returns a copy of the original FilePath.
+  FilePath RemoveExtension(const char* extension) const;
+
+  // Creates directories so that path exists. Returns true if successful or if
+  // the directories already exist; returns false if unable to create
+  // directories for any reason. Will also return false if the FilePath does
+  // not represent a directory (that is, it doesn't end with a path separator).
+  bool CreateDirectoriesRecursively() const;
+
+  // Create the directory so that path exists. Returns true if successful or
+  // if the directory already exists; returns false if unable to create the
+  // directory for any reason, including if the parent directory does not
+  // exist. Not named "CreateDirectory" because that's a macro on Windows.
+  bool CreateFolder() const;
+
+  // Returns true if FilePath describes something in the file-system,
+  // either a file, directory, or whatever, and that something exists.
+  bool FileOrDirectoryExists() const;
+
+  // Returns true if pathname describes a directory in the file-system
+  // that exists.
+  bool DirectoryExists() const;
+
+  // Returns true if FilePath ends with a path separator, which indicates that
+  // it is intended to represent a directory. Returns false otherwise.
+  // This does NOT check that a directory (or file) actually exists.
+  bool IsDirectory() const;
+
+  // Returns true if pathname describes a root directory. (Windows has one
+  // root directory per disk drive.)
+  bool IsRootDirectory() const;
+
+  // Returns true if pathname describes an absolute path.
+  bool IsAbsolutePath() const;
+
+ private:
+  // Replaces multiple consecutive separators with a single separator.
+  // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+  // redundancies that might be in a pathname involving "." or "..".
+  //
+  // A pathname with multiple consecutive separators may occur either through
+  // user error or as a result of some scripts or APIs that generate a pathname
+  // with a trailing separator. On other platforms the same API or script
+  // may NOT generate a pathname with a trailing "/". Then elsewhere that
+  // pathname may have another "/" and pathname components added to it,
+  // without checking for the separator already being there.
+  // The script language and operating system may allow paths like "foo//bar"
+  // but some of the functions in FilePath will not handle that correctly. In
+  // particular, RemoveTrailingPathSeparator() only removes one separator, and
+  // it is called in CreateDirectoriesRecursively() assuming that it will change
+  // a pathname from directory syntax (trailing separator) to filename syntax.
+  //
+  // On Windows this method also replaces the alternate path separator '/' with
+  // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+  // "bar\\foo".
+
+  void Normalize();
+
+  // Returns a pointer to the last occurence of a valid path separator in
+  // the FilePath. On Windows, for example, both '/' and '\' are valid path
+  // separators. Returns NULL if no path separator was found.
+  const char* FindLastPathSeparator() const;
+
+  std::string pathname_;
+};  // class FilePath
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+// This file was GENERATED by command:
+//     pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests.  This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
+//
+// Currently we support at most 50 types in a list, and at most 50
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+
+// #ifdef __GNUC__ is too general here.  It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# if GTEST_HAS_CXXABI_H_
+#  include <cxxabi.h>
+# elif defined(__HP_aCC)
+#  include <acxx_demangle.h>
+# endif  // GTEST_HASH_CXXABI_H_
+
+namespace testing {
+namespace internal {
+
+// GetTypeName<T>() returns a human-readable name of type T.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+std::string GetTypeName() {
+# if GTEST_HAS_RTTI
+
+  const char* const name = typeid(T).name();
+#  if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
+  int status = 0;
+  // gcc's implementation of typeid(T).name() mangles the type name,
+  // so we have to demangle it.
+#   if GTEST_HAS_CXXABI_H_
+  using abi::__cxa_demangle;
+#   endif  // GTEST_HAS_CXXABI_H_
+  char* const readable_name = __cxa_demangle(name, 0, 0, &status);
+  const std::string name_str(status == 0 ? readable_name : name);
+  free(readable_name);
+  return name_str;
+#  else
+  return name;
+#  endif  // GTEST_HAS_CXXABI_H_ || __HP_aCC
+
+# else
+
+  return "<type>";
+
+# endif  // GTEST_HAS_RTTI
+}
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type.  This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+  typedef bool type;
+};
+
+// A unique type used as the default value for the arguments of class
+// template Types.  This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists.  In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+  typedef T1 Head;
+  typedef Types0 Tail;
+};
+template <typename T1, typename T2>
+struct Types2 {
+  typedef T1 Head;
+  typedef Types1<T2> Tail;
+};
+
+template <typename T1, typename T2, typename T3>
+struct Types3 {
+  typedef T1 Head;
+  typedef Types2<T2, T3> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types4 {
+  typedef T1 Head;
+  typedef Types3<T2, T3, T4> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types5 {
+  typedef T1 Head;
+  typedef Types4<T2, T3, T4, T5> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+struct Types6 {
+  typedef T1 Head;
+  typedef Types5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+struct Types7 {
+  typedef T1 Head;
+  typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+struct Types8 {
+  typedef T1 Head;
+  typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+struct Types9 {
+  typedef T1 Head;
+  typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types10 {
+  typedef T1 Head;
+  typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11>
+struct Types11 {
+  typedef T1 Head;
+  typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12>
+struct Types12 {
+  typedef T1 Head;
+  typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13>
+struct Types13 {
+  typedef T1 Head;
+  typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14>
+struct Types14 {
+  typedef T1 Head;
+  typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types15 {
+  typedef T1 Head;
+  typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16>
+struct Types16 {
+  typedef T1 Head;
+  typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17>
+struct Types17 {
+  typedef T1 Head;
+  typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18>
+struct Types18 {
+  typedef T1 Head;
+  typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19>
+struct Types19 {
+  typedef T1 Head;
+  typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types20 {
+  typedef T1 Head;
+  typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21>
+struct Types21 {
+  typedef T1 Head;
+  typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22>
+struct Types22 {
+  typedef T1 Head;
+  typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23>
+struct Types23 {
+  typedef T1 Head;
+  typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24>
+struct Types24 {
+  typedef T1 Head;
+  typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types25 {
+  typedef T1 Head;
+  typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26>
+struct Types26 {
+  typedef T1 Head;
+  typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27>
+struct Types27 {
+  typedef T1 Head;
+  typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28>
+struct Types28 {
+  typedef T1 Head;
+  typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29>
+struct Types29 {
+  typedef T1 Head;
+  typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types30 {
+  typedef T1 Head;
+  typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31>
+struct Types31 {
+  typedef T1 Head;
+  typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32>
+struct Types32 {
+  typedef T1 Head;
+  typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33>
+struct Types33 {
+  typedef T1 Head;
+  typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34>
+struct Types34 {
+  typedef T1 Head;
+  typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types35 {
+  typedef T1 Head;
+  typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36>
+struct Types36 {
+  typedef T1 Head;
+  typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37>
+struct Types37 {
+  typedef T1 Head;
+  typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38>
+struct Types38 {
+  typedef T1 Head;
+  typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39>
+struct Types39 {
+  typedef T1 Head;
+  typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types40 {
+  typedef T1 Head;
+  typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41>
+struct Types41 {
+  typedef T1 Head;
+  typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42>
+struct Types42 {
+  typedef T1 Head;
+  typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43>
+struct Types43 {
+  typedef T1 Head;
+  typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44>
+struct Types44 {
+  typedef T1 Head;
+  typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types45 {
+  typedef T1 Head;
+  typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46>
+struct Types46 {
+  typedef T1 Head;
+  typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45, T46> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47>
+struct Types47 {
+  typedef T1 Head;
+  typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45, T46, T47> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48>
+struct Types48 {
+  typedef T1 Head;
+  typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45, T46, T47, T48> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49>
+struct Types49 {
+  typedef T1 Head;
+  typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49, typename T50>
+struct Types50 {
+  typedef T1 Head;
+  typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+      T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+      T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+      T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+}  // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length.  Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable.  The translation is done by the 'type' member of the
+// Types template.
+template <typename T1 = internal::None, typename T2 = internal::None,
+    typename T3 = internal::None, typename T4 = internal::None,
+    typename T5 = internal::None, typename T6 = internal::None,
+    typename T7 = internal::None, typename T8 = internal::None,
+    typename T9 = internal::None, typename T10 = internal::None,
+    typename T11 = internal::None, typename T12 = internal::None,
+    typename T13 = internal::None, typename T14 = internal::None,
+    typename T15 = internal::None, typename T16 = internal::None,
+    typename T17 = internal::None, typename T18 = internal::None,
+    typename T19 = internal::None, typename T20 = internal::None,
+    typename T21 = internal::None, typename T22 = internal::None,
+    typename T23 = internal::None, typename T24 = internal::None,
+    typename T25 = internal::None, typename T26 = internal::None,
+    typename T27 = internal::None, typename T28 = internal::None,
+    typename T29 = internal::None, typename T30 = internal::None,
+    typename T31 = internal::None, typename T32 = internal::None,
+    typename T33 = internal::None, typename T34 = internal::None,
+    typename T35 = internal::None, typename T36 = internal::None,
+    typename T37 = internal::None, typename T38 = internal::None,
+    typename T39 = internal::None, typename T40 = internal::None,
+    typename T41 = internal::None, typename T42 = internal::None,
+    typename T43 = internal::None, typename T44 = internal::None,
+    typename T45 = internal::None, typename T46 = internal::None,
+    typename T47 = internal::None, typename T48 = internal::None,
+    typename T49 = internal::None, typename T50 = internal::None>
+struct Types {
+  typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Types<internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types0 type;
+};
+template <typename T1>
+struct Types<T1, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types1<T1> type;
+};
+template <typename T1, typename T2>
+struct Types<T1, T2, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types2<T1, T2> type;
+};
+template <typename T1, typename T2, typename T3>
+struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types3<T1, T2, T3> type;
+};
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types4<T1, T2, T3, T4> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types5<T1, T2, T3, T4, T5> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None, internal::None> {
+  typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None, internal::None> {
+  typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+    internal::None, internal::None, internal::None, internal::None,
+    internal::None> {
+  typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+    T46, internal::None, internal::None, internal::None, internal::None> {
+  typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+    T46, T47, internal::None, internal::None, internal::None> {
+  typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46, T47> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+    T46, T47, T48, internal::None, internal::None> {
+  typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+    T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+    T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+    T46, T47, T48, T49, internal::None> {
+  typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+namespace internal {
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type.  TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>.  This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+  template <typename T>
+  struct Bind {
+    typedef Tmpl<T> type;
+  };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+  TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates.  This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists.  In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN).  Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates0 Tail;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates2 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates1<T2> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates3 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates2<T2, T3> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4>
+struct Templates4 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates3<T2, T3, T4> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates5 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates4<T2, T3, T4, T5> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates6 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7>
+struct Templates7 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates8 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates9 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10>
+struct Templates10 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates11 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates12 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13>
+struct Templates13 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates14 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates15 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16>
+struct Templates16 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates17 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates18 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19>
+struct Templates19 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates20 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates21 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22>
+struct Templates22 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates23 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates24 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25>
+struct Templates25 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates26 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates27 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28>
+struct Templates28 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates29 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates30 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31>
+struct Templates31 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates32 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates33 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34>
+struct Templates34 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates35 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates36 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37>
+struct Templates37 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates38 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates39 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40>
+struct Templates40 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates41 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates42 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43>
+struct Templates43 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates44 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates45 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46>
+struct Templates46 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45, T46> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates47 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45, T46, T47> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates48 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45, T46, T47, T48> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+    GTEST_TEMPLATE_ T49>
+struct Templates49 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+    GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
+struct Templates50 {
+  typedef TemplateSel<T1> Head;
+  typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+      T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+      T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+      T43, T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length.  Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable.  The translation is done by the 'type' member of the
+// Templates template.
+template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
+    GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
+    GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
+    GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
+    GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
+    GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
+    GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
+    GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
+    GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
+    GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
+    GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
+    GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
+    GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
+    GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
+    GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
+    GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
+    GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
+    GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
+    GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
+    GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
+    GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
+    GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
+    GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
+    GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
+    GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
+struct Templates {
+  typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT> {
+  typedef Templates0 type;
+};
+template <GTEST_TEMPLATE_ T1>
+struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT> {
+  typedef Templates1<T1> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT> {
+  typedef Templates2<T1, T2> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates3<T1, T2, T3> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4>
+struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates4<T1, T2, T3, T4> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates5<T1, T2, T3, T4, T5> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates6<T1, T2, T3, T4, T5, T6> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT> {
+  typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT> {
+  typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT> {
+  typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT> {
+  typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT> {
+  typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT> {
+  typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT> {
+  typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT> {
+  typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    T45, T46, NoneT, NoneT, NoneT, NoneT> {
+  typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45, T46> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    T45, T46, T47, NoneT, NoneT, NoneT> {
+  typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45, T46, T47> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    T45, T46, T47, T48, NoneT, NoneT> {
+  typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+    GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+    GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+    GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+    GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+    GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+    GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+    GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+    GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+    GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+    GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+    GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+    GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+    GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+    GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+    GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+    GTEST_TEMPLATE_ T49>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+    T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+    T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+    T45, T46, T47, T48, T49, NoneT> {
+  typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+      T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+      T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+      T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList {
+  typedef Types1<T> type;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49, typename T50>
+struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46, T47, T48, T49, T50> > {
+  typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+      T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+      T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+      T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
+};
+
+#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__.  Writing
+//
+//   foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number.  For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+class ProtocolMessage;
+namespace proto2 { class Message; }
+
+namespace testing {
+
+// Forward declarations.
+
+class AssertionResult;                 // Result of an assertion.
+class Message;                         // Represents a failure message.
+class Test;                            // Represents a test.
+class TestInfo;                        // Information about a test.
+class TestPartResult;                  // Result of a test part.
+class UnitTest;                        // A collection of test cases.
+
+template <typename T>
+::std::string PrintToString(const T& value);
+
+namespace internal {
+
+struct TraceInfo;                      // Information about a trace point.
+class ScopedTrace;                     // Implements scoped trace.
+class TestInfoImpl;                    // Opaque implementation of TestInfo
+class UnitTestImpl;                    // Opaque implementation of UnitTest
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// Two overloaded helpers for checking at compile time whether an
+// expression is a null pointer literal (i.e. NULL or any 0-valued
+// compile-time integral constant).  Their return values have
+// different sizes, so we can use sizeof() to test which version is
+// picked by the compiler.  These helpers have no implementations, as
+// we only need their signatures.
+//
+// Given IsNullLiteralHelper(x), the compiler will pick the first
+// version if x can be implicitly converted to Secret*, and pick the
+// second version otherwise.  Since Secret is a secret and incomplete
+// type, the only expression a user can write that has type Secret* is
+// a null pointer literal.  Therefore, we know that x is a null
+// pointer literal if and only if the first version is picked by the
+// compiler.
+char IsNullLiteralHelper(Secret* p);
+char (&IsNullLiteralHelper(...))[2];  // NOLINT
+
+// A compile-time bool constant that is true if and only if x is a
+// null pointer literal (i.e. NULL or any 0-valued compile-time
+// integral constant).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_IS_NULL_LITERAL_(x) false
+#else
+# define GTEST_IS_NULL_LITERAL_(x) \
+    (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
+#endif  // GTEST_ELLIPSIS_NEEDS_POD_
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ std::string AppendUserMessage(
+    const std::string& gtest_msg, const Message& user_msg);
+
+#if GTEST_HAS_EXCEPTIONS
+
+// This exception is thrown by (and only by) a failed Google Test
+// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions
+// are enabled).  We derive it from std::runtime_error, which is for
+// errors presumably detectable only at run time.  Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {
+ public:
+  explicit GoogleTestFailureException(const TestPartResult& failure);
+};
+
+#endif  // GTEST_HAS_EXCEPTIONS
+
+// A helper class for creating scoped traces in user programs.
+class GTEST_API_ ScopedTrace {
+ public:
+  // The c'tor pushes the given source file location and message onto
+  // a trace stack maintained by Google Test.
+  ScopedTrace(const char* file, int line, const Message& message);
+
+  // The d'tor pops the info pushed by the c'tor.
+  //
+  // Note that the d'tor is not virtual in order to be efficient.
+  // Don't inherit from ScopedTrace!
+  ~ScopedTrace();
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_;  // A ScopedTrace object does its job in its
+                            // c'tor and d'tor.  Therefore it doesn't
+                            // need to be used otherwise.
+
+namespace edit_distance {
+// Returns the optimal edits to go from 'left' to 'right'.
+// All edits cost the same, with replace having lower priority than
+// add/remove.
+// Simple implementation of the Wagner–Fischer algorithm.
+// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm
+enum EditType { kMatch, kAdd, kRemove, kReplace };
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+    const std::vector<size_t>& left, const std::vector<size_t>& right);
+
+// Same as above, but the input is represented as strings.
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+    const std::vector<std::string>& left,
+    const std::vector<std::string>& right);
+
+// Create a diff of the input strings in Unified diff format.
+GTEST_API_ std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+                                         const std::vector<std::string>& right,
+                                         size_t context = 2);
+
+}  // namespace edit_distance
+
+// Calculate the diff between 'left' and 'right' and return it in unified diff
+// format.
+// If not null, stores in 'total_line_count' the total number of lines found
+// in left + right.
+GTEST_API_ std::string DiffStrings(const std::string& left,
+                                   const std::string& right,
+                                   size_t* total_line_count);
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings.  For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+//   expected_expression: "foo"
+//   actual_expression:   "bar"
+//   expected_value:      "5"
+//   actual_value:        "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*.  When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+                                     const char* actual_expression,
+                                     const std::string& expected_value,
+                                     const std::string& actual_value,
+                                     bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ std::string GetBoolAssertionFailureMessage(
+    const AssertionResult& assertion_result,
+    const char* expression_text,
+    const char* actual_predicate_value,
+    const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison.  (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly.  Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+//   The most-significant bit being the leftmost, an IEEE
+//   floating-point looks like
+//
+//     sign_bit exponent_bits fraction_bits
+//
+//   Here, sign_bit is a single bit that designates the sign of the
+//   number.
+//
+//   For float, there are 8 exponent bits and 23 fraction bits.
+//
+//   For double, there are 11 exponent bits and 52 fraction bits.
+//
+//   More details can be found at
+//   http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+//   RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+  // Defines the unsigned integer type that has the same size as the
+  // floating point number.
+  typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+  // Constants.
+
+  // # of bits in a number.
+  static const size_t kBitCount = 8*sizeof(RawType);
+
+  // # of fraction bits in a number.
+  static const size_t kFractionBitCount =
+    std::numeric_limits<RawType>::digits - 1;
+
+  // # of exponent bits in a number.
+  static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+  // The mask for the sign bit.
+  static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+  // The mask for the fraction bits.
+  static const Bits kFractionBitMask =
+    ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+  // The mask for the exponent bits.
+  static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+  // How many ULP's (Units in the Last Place) we want to tolerate when
+  // comparing two numbers.  The larger the value, the more error we
+  // allow.  A 0 value means that two numbers must be exactly the same
+  // to be considered equal.
+  //
+  // The maximum error of a single floating-point operation is 0.5
+  // units in the last place.  On Intel CPU's, all floating-point
+  // calculations are done with 80-bit precision, while double has 64
+  // bits.  Therefore, 4 should be enough for ordinary use.
+  //
+  // See the following article for more details on ULP:
+  // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+  static const size_t kMaxUlps = 4;
+
+  // Constructs a FloatingPoint from a raw floating-point number.
+  //
+  // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+  // around may change its bits, although the new value is guaranteed
+  // to be also a NAN.  Therefore, don't expect this constructor to
+  // preserve the bits in x when x is a NAN.
+  explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+  // Static methods
+
+  // Reinterprets a bit pattern as a floating-point number.
+  //
+  // This function is needed to test the AlmostEquals() method.
+  static RawType ReinterpretBits(const Bits bits) {
+    FloatingPoint fp(0);
+    fp.u_.bits_ = bits;
+    return fp.u_.value_;
+  }
+
+  // Returns the floating-point number that represent positive infinity.
+  static RawType Infinity() {
+    return ReinterpretBits(kExponentBitMask);
+  }
+
+  // Returns the maximum representable finite floating-point number.
+  static RawType Max();
+
+  // Non-static methods
+
+  // Returns the bits that represents this number.
+  const Bits &bits() const { return u_.bits_; }
+
+  // Returns the exponent bits of this number.
+  Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+  // Returns the fraction bits of this number.
+  Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+  // Returns the sign bit of this number.
+  Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+  // Returns true iff this is NAN (not a number).
+  bool is_nan() const {
+    // It's a NAN if the exponent bits are all ones and the fraction
+    // bits are not entirely zeros.
+    return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+  }
+
+  // Returns true iff this number is at most kMaxUlps ULP's away from
+  // rhs.  In particular, this function:
+  //
+  //   - returns false if either number is (or both are) NAN.
+  //   - treats really large numbers as almost equal to infinity.
+  //   - thinks +0.0 and -0.0 are 0 DLP's apart.
+  bool AlmostEquals(const FloatingPoint& rhs) const {
+    // The IEEE standard says that any comparison operation involving
+    // a NAN must return false.
+    if (is_nan() || rhs.is_nan()) return false;
+
+    return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+        <= kMaxUlps;
+  }
+
+ private:
+  // The data type used to store the actual floating-point number.
+  union FloatingPointUnion {
+    RawType value_;  // The raw floating-point number.
+    Bits bits_;      // The bits that represent the number.
+  };
+
+  // Converts an integer from the sign-and-magnitude representation to
+  // the biased representation.  More precisely, let N be 2 to the
+  // power of (kBitCount - 1), an integer x is represented by the
+  // unsigned number x + N.
+  //
+  // For instance,
+  //
+  //   -N + 1 (the most negative number representable using
+  //          sign-and-magnitude) is represented by 1;
+  //   0      is represented by N; and
+  //   N - 1  (the biggest number representable using
+  //          sign-and-magnitude) is represented by 2N - 1.
+  //
+  // Read http://en.wikipedia.org/wiki/Signed_number_representations
+  // for more details on signed number representations.
+  static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+    if (kSignBitMask & sam) {
+      // sam represents a negative number.
+      return ~sam + 1;
+    } else {
+      // sam represents a positive number.
+      return kSignBitMask | sam;
+    }
+  }
+
+  // Given two numbers in the sign-and-magnitude representation,
+  // returns the distance between them as an unsigned number.
+  static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+                                                     const Bits &sam2) {
+    const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+    const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+    return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+  }
+
+  FloatingPointUnion u_;
+};
+
+// We cannot use std::numeric_limits<T>::max() as it clashes with the max()
+// macro defined by <windows.h>.
+template <>
+inline float FloatingPoint<float>::Max() { return FLT_MAX; }
+template <>
+inline double FloatingPoint<double>::Max() { return DBL_MAX; }
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test case, we need to assign
+// unique IDs to fixture classes and compare them.  The TypeId type is
+// used to hold such IDs.  The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+  // dummy_ must not have a const type.  Otherwise an overly eager
+  // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+  // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+  static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T.  Different values will be
+// returned for different types.  Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+  // The compiler is required to allocate a different
+  // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+  // the template.  Therefore, the address of dummy_ is guaranteed to
+  // be unique.
+  return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test.  Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+  virtual ~TestFactoryBase() {}
+
+  // Creates a test instance to run. The instance is both created and destroyed
+  // within TestInfoImpl::Run()
+  virtual Test* CreateTest() = 0;
+
+ protected:
+  TestFactoryBase() {}
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+  virtual Test* CreateTest() { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+                                            long hr);  // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+                                            long hr);  // NOLINT
+
+#endif  // GTEST_OS_WINDOWS
+
+// Types of SetUpTestCase() and TearDownTestCase() functions.
+typedef void (*SetUpTestCaseFunc)();
+typedef void (*TearDownTestCaseFunc)();
+
+struct CodeLocation {
+  CodeLocation(const string& a_file, int a_line) : file(a_file), line(a_line) {}
+
+  string file;
+  int line;
+};
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+//   test_case_name:   name of the test case
+//   name:             name of the test
+//   type_param        the name of the test's type parameter, or NULL if
+//                     this is not a typed or a type-parameterized test.
+//   value_param       text representation of the test's value parameter,
+//                     or NULL if this is not a type-parameterized test.
+//   code_location:    code location where the test is defined
+//   fixture_class_id: ID of the test fixture class
+//   set_up_tc:        pointer to the function that sets up the test case
+//   tear_down_tc:     pointer to the function that tears down the test case
+//   factory:          pointer to the factory that creates a test object.
+//                     The newly created TestInfo instance will assume
+//                     ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+    const char* test_case_name,
+    const char* name,
+    const char* type_param,
+    const char* value_param,
+    CodeLocation code_location,
+    TypeId fixture_class_id,
+    SetUpTestCaseFunc set_up_tc,
+    TearDownTestCaseFunc tear_down_tc,
+    TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false.  None of pstr, *pstr, and prefix can be NULL.
+GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// State of the definition of a type-parameterized test case.
+class GTEST_API_ TypedTestCasePState {
+ public:
+  TypedTestCasePState() : registered_(false) {}
+
+  // Adds the given test name to defined_test_names_ and return true
+  // if the test case hasn't been registered; otherwise aborts the
+  // program.
+  bool AddTestName(const char* file, int line, const char* case_name,
+                   const char* test_name) {
+    if (registered_) {
+      fprintf(stderr, "%s Test %s must be defined before "
+              "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
+              FormatFileLocation(file, line).c_str(), test_name, case_name);
+      fflush(stderr);
+      posix::Abort();
+    }
+    registered_tests_.insert(
+        ::std::make_pair(test_name, CodeLocation(file, line)));
+    return true;
+  }
+
+  bool TestExists(const std::string& test_name) const {
+    return registered_tests_.count(test_name) > 0;
+  }
+
+  const CodeLocation& GetCodeLocation(const std::string& test_name) const {
+    RegisteredTestsMap::const_iterator it = registered_tests_.find(test_name);
+    GTEST_CHECK_(it != registered_tests_.end());
+    return it->second;
+  }
+
+  // Verifies that registered_tests match the test names in
+  // defined_test_names_; returns registered_tests if successful, or
+  // aborts the program otherwise.
+  const char* VerifyRegisteredTestNames(
+      const char* file, int line, const char* registered_tests);
+
+ private:
+  typedef ::std::map<std::string, CodeLocation> RegisteredTestsMap;
+
+  bool registered_;
+  RegisteredTestsMap registered_tests_;
+};
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+  const char* comma = strchr(str, ',');
+  if (comma == NULL) {
+    return NULL;
+  }
+  while (IsSpace(*(++comma))) {}
+  return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline std::string GetPrefixUntilComma(const char* str) {
+  const char* comma = strchr(str, ',');
+  return comma == NULL ? str : std::string(str, comma);
+}
+
+// Splits a given string on a given delimiter, populating a given
+// vector with the fields.
+void SplitString(const ::std::string& str, char delimiter,
+                 ::std::vector< ::std::string>* dest);
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test.  The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter.  It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+  // 'index' is the index of the test in the type list 'Types'
+  // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
+  // Types).  Valid values for 'index' are [0, N - 1] where N is the
+  // length of Types.
+  static bool Register(const char* prefix,
+                       CodeLocation code_location,
+                       const char* case_name, const char* test_names,
+                       int index) {
+    typedef typename Types::Head Type;
+    typedef Fixture<Type> FixtureClass;
+    typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+    // First, registers the first type-parameterized test in the type
+    // list.
+    MakeAndRegisterTestInfo(
+        (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/"
+         + StreamableToString(index)).c_str(),
+        StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(),
+        GetTypeName<Type>().c_str(),
+        NULL,  // No value parameter.
+        code_location,
+        GetTypeId<FixtureClass>(),
+        TestClass::SetUpTestCase,
+        TestClass::TearDownTestCase,
+        new TestFactoryImpl<TestClass>);
+
+    // Next, recurses (at compile time) with the tail of the type list.
+    return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
+        ::Register(prefix, code_location, case_name, test_names, index + 1);
+  }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, Types0> {
+ public:
+  static bool Register(const char* /*prefix*/, CodeLocation,
+                       const char* /*case_name*/, const char* /*test_names*/,
+                       int /*index*/) {
+    return true;
+  }
+};
+
+// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test.  The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestCase {
+ public:
+  static bool Register(const char* prefix, CodeLocation code_location,
+                       const TypedTestCasePState* state,
+                       const char* case_name, const char* test_names) {
+    std::string test_name = StripTrailingSpaces(
+        GetPrefixUntilComma(test_names));
+    if (!state->TestExists(test_name)) {
+      fprintf(stderr, "Failed to get code location for test %s.%s at %s.",
+              case_name, test_name.c_str(),
+              FormatFileLocation(code_location.file.c_str(),
+                                 code_location.line).c_str());
+      fflush(stderr);
+      posix::Abort();
+    }
+    const CodeLocation& test_location = state->GetCodeLocation(test_name);
+
+    typedef typename Tests::Head Head;
+
+    // First, register the first test in 'Test' for each type in 'Types'.
+    TypeParameterizedTest<Fixture, Head, Types>::Register(
+        prefix, test_location, case_name, test_names, 0);
+
+    // Next, recurses (at compile time) with the tail of the test list.
+    return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
+        ::Register(prefix, code_location, state,
+                   case_name, SkipComma(test_names));
+  }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestCase<Fixture, Templates0, Types> {
+ public:
+  static bool Register(const char* /*prefix*/, CodeLocation,
+                       const TypedTestCasePState* /*state*/,
+                       const char* /*case_name*/, const char* /*test_names*/) {
+    return true;
+  }
+};
+
+#endif  // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag.  The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(
+    UnitTest* unit_test, int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// Helper for suppressing false warning from Clang on a const char*
+// variable declared in a conditional expression always being NULL in
+// the else branch.
+struct GTEST_API_ ConstCharPtr {
+  ConstCharPtr(const char* str) : value(str) {}
+  operator bool() const { return true; }
+  const char* value;
+};
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution.  Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code).  Unlike rand_r(), it's portable.  An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+  static const UInt32 kMaxRange = 1u << 31;
+
+  explicit Random(UInt32 seed) : state_(seed) {}
+
+  void Reseed(UInt32 seed) { state_ = seed; }
+
+  // Generates a random number from [0, range).  Crashes if 'range' is
+  // 0 or greater than kMaxRange.
+  UInt32 Generate(UInt32 range);
+
+ private:
+  UInt32 state_;
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
+// compiler error iff T1 and T2 are different types.
+template <typename T1, typename T2>
+struct CompileAssertTypesEqual;
+
+template <typename T>
+struct CompileAssertTypesEqual<T, T> {
+};
+
+// Removes the reference from a type if it is a reference type,
+// otherwise leaves it unchanged.  This is the same as
+// tr1::remove_reference, which is not widely available yet.
+template <typename T>
+struct RemoveReference { typedef T type; };  // NOLINT
+template <typename T>
+struct RemoveReference<T&> { typedef T type; };  // NOLINT
+
+// A handy wrapper around RemoveReference that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_REFERENCE_(T) \
+    typename ::testing::internal::RemoveReference<T>::type
+
+// Removes const from a type if it is a const type, otherwise leaves
+// it unchanged.  This is the same as tr1::remove_const, which is not
+// widely available yet.
+template <typename T>
+struct RemoveConst { typedef T type; };  // NOLINT
+template <typename T>
+struct RemoveConst<const T> { typedef T type; };  // NOLINT
+
+// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above
+// definition to fail to remove the const in 'const int[3]' and 'const
+// char[3][4]'.  The following specialization works around the bug.
+template <typename T, size_t N>
+struct RemoveConst<const T[N]> {
+  typedef typename RemoveConst<T>::type type[N];
+};
+
+#if defined(_MSC_VER) && _MSC_VER < 1400
+// This is the only specialization that allows VC++ 7.1 to remove const in
+// 'const int[3] and 'const int[3][4]'.  However, it causes trouble with GCC
+// and thus needs to be conditionally compiled.
+template <typename T, size_t N>
+struct RemoveConst<T[N]> {
+  typedef typename RemoveConst<T>::type type[N];
+};
+#endif
+
+// A handy wrapper around RemoveConst that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_CONST_(T) \
+    typename ::testing::internal::RemoveConst<T>::type
+
+// Turns const U&, U&, const U, and U all into U.
+#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
+    GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
+
+// Adds reference to a type if it is not a reference type,
+// otherwise leaves it unchanged.  This is the same as
+// tr1::add_reference, which is not widely available yet.
+template <typename T>
+struct AddReference { typedef T& type; };  // NOLINT
+template <typename T>
+struct AddReference<T&> { typedef T& type; };  // NOLINT
+
+// A handy wrapper around AddReference that works when the argument T
+// depends on template parameters.
+#define GTEST_ADD_REFERENCE_(T) \
+    typename ::testing::internal::AddReference<T>::type
+
+// Adds a reference to const on top of T as necessary.  For example,
+// it transforms
+//
+//   char         ==> const char&
+//   const char   ==> const char&
+//   char&        ==> const char&
+//   const char&  ==> const char&
+//
+// The argument T must depend on some template parameters.
+#define GTEST_REFERENCE_TO_CONST_(T) \
+    GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))
+
+// ImplicitlyConvertible<From, To>::value is a compile-time bool
+// constant that's true iff type From can be implicitly converted to
+// type To.
+template <typename From, typename To>
+class ImplicitlyConvertible {
+ private:
+  // We need the following helper functions only for their types.
+  // They have no implementations.
+
+  // MakeFrom() is an expression whose type is From.  We cannot simply
+  // use From(), as the type From may not have a public default
+  // constructor.
+  static typename AddReference<From>::type MakeFrom();
+
+  // These two functions are overloaded.  Given an expression
+  // Helper(x), the compiler will pick the first version if x can be
+  // implicitly converted to type To; otherwise it will pick the
+  // second version.
+  //
+  // The first version returns a value of size 1, and the second
+  // version returns a value of size 2.  Therefore, by checking the
+  // size of Helper(x), which can be done at compile time, we can tell
+  // which version of Helper() is used, and hence whether x can be
+  // implicitly converted to type To.
+  static char Helper(To);
+  static char (&Helper(...))[2];  // NOLINT
+
+  // We have to put the 'public' section after the 'private' section,
+  // or MSVC refuses to compile the code.
+ public:
+#if defined(__BORLANDC__)
+  // C++Builder cannot use member overload resolution during template
+  // instantiation.  The simplest workaround is to use its C++0x type traits
+  // functions (C++Builder 2009 and above only).
+  static const bool value = __is_convertible(From, To);
+#else
+  // MSVC warns about implicitly converting from double to int for
+  // possible loss of data, so we need to temporarily disable the
+  // warning.
+  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244)
+  static const bool value =
+      sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+  GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif  // __BORLANDC__
+};
+template <typename From, typename To>
+const bool ImplicitlyConvertible<From, To>::value;
+
+// IsAProtocolMessage<T>::value is a compile-time bool constant that's
+// true iff T is type ProtocolMessage, proto2::Message, or a subclass
+// of those.
+template <typename T>
+struct IsAProtocolMessage
+    : public bool_constant<
+  ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||
+  ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {
+};
+
+// When the compiler sees expression IsContainerTest<C>(0), if C is an
+// STL-style container class, the first overload of IsContainerTest
+// will be viable (since both C::iterator* and C::const_iterator* are
+// valid types and NULL can be implicitly converted to them).  It will
+// be picked over the second overload as 'int' is a perfect match for
+// the type of argument 0.  If C::iterator or C::const_iterator is not
+// a valid type, the first overload is not viable, and the second
+// overload will be picked.  Therefore, we can determine whether C is
+// a container class by checking the type of IsContainerTest<C>(0).
+// The value of the expression is insignificant.
+//
+// Note that we look for both C::iterator and C::const_iterator.  The
+// reason is that C++ injects the name of a class as a member of the
+// class itself (e.g. you can refer to class iterator as either
+// 'iterator' or 'iterator::iterator').  If we look for C::iterator
+// only, for example, we would mistakenly think that a class named
+// iterator is an STL container.
+//
+// Also note that the simpler approach of overloading
+// IsContainerTest(typename C::const_iterator*) and
+// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
+typedef int IsContainer;
+template <class C>
+IsContainer IsContainerTest(int /* dummy */,
+                            typename C::iterator* /* it */ = NULL,
+                            typename C::const_iterator* /* const_it */ = NULL) {
+  return 0;
+}
+
+typedef char IsNotContainer;
+template <class C>
+IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
+
+// EnableIf<condition>::type is void when 'Cond' is true, and
+// undefined when 'Cond' is false.  To use SFINAE to make a function
+// overload only apply when a particular expression is true, add
+// "typename EnableIf<expression>::type* = 0" as the last parameter.
+template<bool> struct EnableIf;
+template<> struct EnableIf<true> { typedef void type; };  // NOLINT
+
+// Utilities for native arrays.
+
+// ArrayEq() compares two k-dimensional native arrays using the
+// elements' operator==, where k can be any integer >= 0.  When k is
+// 0, ArrayEq() degenerates into comparing a single pair of values.
+
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
+  return internal::ArrayEq(lhs, N, rhs);
+}
+
+// This helper reduces code bloat.  If we instead put its logic inside
+// the previous ArrayEq() function, arrays with different sizes would
+// lead to different copies of the template code.
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
+  for (size_t i = 0; i != size; i++) {
+    if (!internal::ArrayEq(lhs[i], rhs[i]))
+      return false;
+  }
+  return true;
+}
+
+// Finds the first element in the iterator range [begin, end) that
+// equals elem.  Element may be a native array type itself.
+template <typename Iter, typename Element>
+Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
+  for (Iter it = begin; it != end; ++it) {
+    if (internal::ArrayEq(*it, elem))
+      return it;
+  }
+  return end;
+}
+
+// CopyArray() copies a k-dimensional native array using the elements'
+// operator=, where k can be any integer >= 0.  When k is 0,
+// CopyArray() degenerates into copying a single value.
+
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline void CopyArray(const T& from, U* to) { *to = from; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline void CopyArray(const T(&from)[N], U(*to)[N]) {
+  internal::CopyArray(from, N, *to);
+}
+
+// This helper reduces code bloat.  If we instead put its logic inside
+// the previous CopyArray() function, arrays with different sizes
+// would lead to different copies of the template code.
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to) {
+  for (size_t i = 0; i != size; i++) {
+    internal::CopyArray(from[i], to + i);
+  }
+}
+
+// The relation between an NativeArray object (see below) and the
+// native array it represents.
+// We use 2 different structs to allow non-copyable types to be used, as long
+// as RelationToSourceReference() is passed.
+struct RelationToSourceReference {};
+struct RelationToSourceCopy {};
+
+// Adapts a native array to a read-only STL-style container.  Instead
+// of the complete STL container concept, this adaptor only implements
+// members useful for Google Mock's container matchers.  New members
+// should be added as needed.  To simplify the implementation, we only
+// support Element being a raw type (i.e. having no top-level const or
+// reference modifier).  It's the client's responsibility to satisfy
+// this requirement.  Element can be an array type itself (hence
+// multi-dimensional arrays are supported).
+template <typename Element>
+class NativeArray {
+ public:
+  // STL-style container typedefs.
+  typedef Element value_type;
+  typedef Element* iterator;
+  typedef const Element* const_iterator;
+
+  // Constructs from a native array. References the source.
+  NativeArray(const Element* array, size_t count, RelationToSourceReference) {
+    InitRef(array, count);
+  }
+
+  // Constructs from a native array. Copies the source.
+  NativeArray(const Element* array, size_t count, RelationToSourceCopy) {
+    InitCopy(array, count);
+  }
+
+  // Copy constructor.
+  NativeArray(const NativeArray& rhs) {
+    (this->*rhs.clone_)(rhs.array_, rhs.size_);
+  }
+
+  ~NativeArray() {
+    if (clone_ != &NativeArray::InitRef)
+      delete[] array_;
+  }
+
+  // STL-style container methods.
+  size_t size() const { return size_; }
+  const_iterator begin() const { return array_; }
+  const_iterator end() const { return array_ + size_; }
+  bool operator==(const NativeArray& rhs) const {
+    return size() == rhs.size() &&
+        ArrayEq(begin(), size(), rhs.begin());
+  }
+
+ private:
+  enum {
+    kCheckTypeIsNotConstOrAReference = StaticAssertTypeEqHelper<
+        Element, GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>::value,
+  };
+
+  // Initializes this object with a copy of the input.
+  void InitCopy(const Element* array, size_t a_size) {
+    Element* const copy = new Element[a_size];
+    CopyArray(array, a_size, copy);
+    array_ = copy;
+    size_ = a_size;
+    clone_ = &NativeArray::InitCopy;
+  }
+
+  // Initializes this object with a reference of the input.
+  void InitRef(const Element* array, size_t a_size) {
+    array_ = array;
+    size_ = a_size;
+    clone_ = &NativeArray::InitRef;
+  }
+
+  const Element* array_;
+  size_t size_;
+  void (NativeArray::*clone_)(const Element*, size_t);
+
+  GTEST_DISALLOW_ASSIGN_(NativeArray);
+};
+
+}  // namespace internal
+}  // namespace testing
+
+#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
+  ::testing::internal::AssertHelper(result_type, file, line, message) \
+    = ::testing::Message()
+
+#define GTEST_MESSAGE_(message, result_type) \
+  GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
+
+#define GTEST_FATAL_FAILURE_(message) \
+  return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+  GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+  GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+  if (::testing::internal::AlwaysTrue()) { statement; }
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::ConstCharPtr gtest_msg = "") { \
+    bool gtest_caught_expected = false; \
+    try { \
+      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+    } \
+    catch (expected_exception const&) { \
+      gtest_caught_expected = true; \
+    } \
+    catch (...) { \
+      gtest_msg.value = \
+          "Expected: " #statement " throws an exception of type " \
+          #expected_exception ".\n  Actual: it throws a different type."; \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+    } \
+    if (!gtest_caught_expected) { \
+      gtest_msg.value = \
+          "Expected: " #statement " throws an exception of type " \
+          #expected_exception ".\n  Actual: it throws nothing."; \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+    } \
+  } else \
+    GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
+      fail(gtest_msg.value)
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::AlwaysTrue()) { \
+    try { \
+      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+    } \
+    catch (...) { \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+    } \
+  } else \
+    GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+      fail("Expected: " #statement " doesn't throw an exception.\n" \
+           "  Actual: it throws.")
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::AlwaysTrue()) { \
+    bool gtest_caught_any = false; \
+    try { \
+      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+    } \
+    catch (...) { \
+      gtest_caught_any = true; \
+    } \
+    if (!gtest_caught_any) { \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+    } \
+  } else \
+    GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+      fail("Expected: " #statement " throws an exception.\n" \
+           "  Actual: it doesn't.")
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (const ::testing::AssertionResult gtest_ar_ = \
+      ::testing::AssertionResult(expression)) \
+    ; \
+  else \
+    fail(::testing::internal::GetBoolAssertionFailureMessage(\
+        gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::AlwaysTrue()) { \
+    ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+    if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+    } \
+  } else \
+    GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+      fail("Expected: " #statement " doesn't generate new fatal " \
+           "failures in the current thread.\n" \
+           "  Actual: it does.")
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+  test_case_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
+class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
+ public:\
+  GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
+ private:\
+  virtual void TestBody();\
+  static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+      GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
+};\
+\
+::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
+  ::test_info_ =\
+    ::testing::internal::MakeAndRegisterTestInfo(\
+        #test_case_name, #test_name, NULL, NULL, \
+        ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+        (parent_id), \
+        parent_class::SetUpTestCase, \
+        parent_class::TearDownTestCase, \
+        new ::testing::internal::TestFactoryImpl<\
+            GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
+void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for death tests.  It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests.  They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+
+#include <stdio.h>
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro.  It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status:  The integer exit information in the format specified
+//               by wait(2)
+// exit code:    The integer code passed to exit(3), _exit(2), or
+//               returned from main()
+class GTEST_API_ DeathTest {
+ public:
+  // Create returns false if there was an error determining the
+  // appropriate action to take for the current death test; for example,
+  // if the gtest_death_test_style flag is set to an invalid value.
+  // The LastMessage method will return a more detailed message in that
+  // case.  Otherwise, the DeathTest pointer pointed to by the "test"
+  // argument is set.  If the death test should be skipped, the pointer
+  // is set to NULL; otherwise, it is set to the address of a new concrete
+  // DeathTest object that controls the execution of the current test.
+  static bool Create(const char* statement, const RE* regex,
+                     const char* file, int line, DeathTest** test);
+  DeathTest();
+  virtual ~DeathTest() { }
+
+  // A helper class that aborts a death test when it's deleted.
+  class ReturnSentinel {
+   public:
+    explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+    ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+   private:
+    DeathTest* const test_;
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+  } GTEST_ATTRIBUTE_UNUSED_;
+
+  // An enumeration of possible roles that may be taken when a death
+  // test is encountered.  EXECUTE means that the death test logic should
+  // be executed immediately.  OVERSEE means that the program should prepare
+  // the appropriate environment for a child process to execute the death
+  // test, then wait for it to complete.
+  enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+  // An enumeration of the three reasons that a test might be aborted.
+  enum AbortReason {
+    TEST_ENCOUNTERED_RETURN_STATEMENT,
+    TEST_THREW_EXCEPTION,
+    TEST_DID_NOT_DIE
+  };
+
+  // Assumes one of the above roles.
+  virtual TestRole AssumeRole() = 0;
+
+  // Waits for the death test to finish and returns its status.
+  virtual int Wait() = 0;
+
+  // Returns true if the death test passed; that is, the test process
+  // exited during the test, its exit status matches a user-supplied
+  // predicate, and its stderr output matches a user-supplied regular
+  // expression.
+  // The user-supplied predicate may be a macro expression rather
+  // than a function pointer or functor, or else Wait and Passed could
+  // be combined.
+  virtual bool Passed(bool exit_status_ok) = 0;
+
+  // Signals that the death test did not die as expected.
+  virtual void Abort(AbortReason reason) = 0;
+
+  // Returns a human-readable outcome message regarding the outcome of
+  // the last death test.
+  static const char* LastMessage();
+
+  static void set_last_death_test_message(const std::string& message);
+
+ private:
+  // A string containing a description of the outcome of the last death test.
+  static std::string last_death_test_message_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+// Factory interface for death tests.  May be mocked out for testing.
+class DeathTestFactory {
+ public:
+  virtual ~DeathTestFactory() { }
+  virtual bool Create(const char* statement, const RE* regex,
+                      const char* file, int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+  virtual bool Create(const char* statement, const RE* regex,
+                      const char* file, int line, DeathTest** test);
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// Traps C++ exceptions escaping statement and reports them as test
+// failures. Note that trapping SEH exceptions is not implemented here.
+# if GTEST_HAS_EXCEPTIONS
+#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+  try { \
+    GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+  } catch (const ::std::exception& gtest_exception) { \
+    fprintf(\
+        stderr, \
+        "\n%s: Caught std::exception-derived exception escaping the " \
+        "death test statement. Exception message: %s\n", \
+        ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
+        gtest_exception.what()); \
+    fflush(stderr); \
+    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+  } catch (...) { \
+    death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+  }
+
+# else
+#  define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+  GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+
+# endif
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::AlwaysTrue()) { \
+    const ::testing::internal::RE& gtest_regex = (regex); \
+    ::testing::internal::DeathTest* gtest_dt; \
+    if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
+        __FILE__, __LINE__, &gtest_dt)) { \
+      goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+    } \
+    if (gtest_dt != NULL) { \
+      ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
+          gtest_dt_ptr(gtest_dt); \
+      switch (gtest_dt->AssumeRole()) { \
+        case ::testing::internal::DeathTest::OVERSEE_TEST: \
+          if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+            goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+          } \
+          break; \
+        case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+          ::testing::internal::DeathTest::ReturnSentinel \
+              gtest_sentinel(gtest_dt); \
+          GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
+          gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+          break; \
+        } \
+        default: \
+          break; \
+      } \
+    } \
+  } else \
+    GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
+      fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in
+// NDEBUG mode. In this case we need the statements to be executed, the regex is
+// ignored, and the macro must accept a streamed message even though the message
+// is never printed.
+# define GTEST_EXECUTE_STATEMENT_(statement, regex) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (::testing::internal::AlwaysTrue()) { \
+     GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+  } else \
+    ::testing::Message()
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+  InternalRunDeathTestFlag(const std::string& a_file,
+                           int a_line,
+                           int an_index,
+                           int a_write_fd)
+      : file_(a_file), line_(a_line), index_(an_index),
+        write_fd_(a_write_fd) {}
+
+  ~InternalRunDeathTestFlag() {
+    if (write_fd_ >= 0)
+      posix::Close(write_fd_);
+  }
+
+  const std::string& file() const { return file_; }
+  int line() const { return line_; }
+  int index() const { return index_; }
+  int write_fd() const { return write_fd_; }
+
+ private:
+  std::string file_;
+  int line_;
+  int index_;
+  int write_fd_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#else  // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+//   statement -  A statement that a macro such as EXPECT_DEATH would test
+//                for program termination. This macro has to make sure this
+//                statement is compiled but not executed, to ensure that
+//                EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+//                parameter iff EXPECT_DEATH compiles with it.
+//   regex     -  A regex that a macro such as EXPECT_DEATH would use to test
+//                the output of statement.  This parameter has to be
+//                compiled but not evaluated by this macro, to ensure that
+//                this macro only accepts expressions that a macro such as
+//                EXPECT_DEATH would accept.
+//   terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+//                and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+//                This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+//                compile inside functions where ASSERT_DEATH doesn't
+//                compile.
+//
+//  The branch that has an always false condition is used to ensure that
+//  statement and regex are compiled (and thus syntactically correct) but
+//  never executed. The unreachable code macro protects the terminator
+//  statement from generating an 'unreachable code' warning in case
+//  statement unconditionally returns or throws. The Message constructor at
+//  the end allows the syntax of streaming additional messages into the
+//  macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+    if (::testing::internal::AlwaysTrue()) { \
+      GTEST_LOG_(WARNING) \
+          << "Death tests are not supported on this platform.\n" \
+          << "Statement '" #statement "' cannot be verified."; \
+    } else if (::testing::internal::AlwaysFalse()) { \
+      ::testing::internal::RE::PartialMatch(".*", (regex)); \
+      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+      terminator; \
+    } else \
+      ::testing::Message()
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+namespace testing {
+
+// This flag controls the style of death tests.  Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process.  Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests.  IMPORTANT: This is an internal utility.  Using it may break the
+// implementation of death tests.  User code MUST NOT use it.
+GTEST_API_ bool InDeathTestChild();
+
+}  // namespace internal
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+//   1. It generates a warning if there is more than one active
+//   thread.  This is because it's safe to fork() or clone() only
+//   when there is a single thread.
+//
+//   2. The parent process clone()s a sub-process and runs the death
+//   test in it; the sub-process exits with code 0 at the end of the
+//   death test, if it hasn't exited already.
+//
+//   3. The parent process waits for the sub-process to terminate.
+//
+//   4. The parent process checks the exit code and error message of
+//   the sub-process.
+//
+// Examples:
+//
+//   ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+//   for (int i = 0; i < 5; i++) {
+//     EXPECT_DEATH(server.ProcessRequest(i),
+//                  "Invalid request .* in ProcessRequest()")
+//                  << "Failed to die on request " << i;
+//   }
+//
+//   ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+//   bool KilledBySIGHUP(int exit_code) {
+//     return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+//   }
+//
+//   ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// On the regular expressions used in death tests:
+//
+//   On POSIX-compliant systems (*nix), we use the <regex.h> library,
+//   which uses the POSIX extended regex syntax.
+//
+//   On other platforms (e.g. Windows), we only support a simple regex
+//   syntax implemented as part of Google Test.  This limited
+//   implementation should be enough most of the time when writing
+//   death tests; though it lacks many features you can find in PCRE
+//   or POSIX extended regex syntax.  For example, we don't support
+//   union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+//   repetition count ("x{5,7}"), among others.
+//
+//   Below is the syntax that we do support.  We chose it to be a
+//   subset of both PCRE and POSIX extended regex, so it's easy to
+//   learn wherever you come from.  In the following: 'A' denotes a
+//   literal character, period (.), or a single \\ escape sequence;
+//   'x' and 'y' denote regular expressions; 'm' and 'n' are for
+//   natural numbers.
+//
+//     c     matches any literal character c
+//     \\d   matches any decimal digit
+//     \\D   matches any character that's not a decimal digit
+//     \\f   matches \f
+//     \\n   matches \n
+//     \\r   matches \r
+//     \\s   matches any ASCII whitespace, including \n
+//     \\S   matches any character that's not a whitespace
+//     \\t   matches \t
+//     \\v   matches \v
+//     \\w   matches any letter, _, or decimal digit
+//     \\W   matches any character that \\w doesn't match
+//     \\c   matches any literal character c, which must be a punctuation
+//     .     matches any single character except \n
+//     A?    matches 0 or 1 occurrences of A
+//     A*    matches 0 or many occurrences of A
+//     A+    matches 1 or many occurrences of A
+//     ^     matches the beginning of a string (not that of each line)
+//     $     matches the end of a string (not that of each line)
+//     xy    matches x followed by y
+//
+//   If you accidentally use PCRE or POSIX extended regex features
+//   not implemented by us, you will get a run-time failure.  In that
+//   case, please try to rewrite your regular expression within the
+//   above syntax.
+//
+//   This implementation is *not* meant to be as highly tuned or robust
+//   as a compiled regex library, but should perform well enough for a
+//   death test, which already incurs significant overhead by launching
+//   a child process.
+//
+// Known caveats:
+//
+//   A "threadsafe" style death test obtains the path to the test
+//   program from argv[0] and re-executes it in the sub-process.  For
+//   simplicity, the current implementation doesn't search the PATH
+//   when launching the sub-process.  This means that the user must
+//   invoke the test program via a path that contains at least one
+//   path separator (e.g. path/to/foo_test and
+//   /absolute/path/to/bar_test are fine, but foo_test is not).  This
+//   is rarely a problem as people usually don't put the test binary
+//   directory in PATH.
+//
+// TODO(wan@google.com): make thread-safe death tests search the PATH.
+
+// Asserts that a given statement causes the program to exit, with an
+// integer exit status that satisfies predicate, and emitting error output
+// that matches regex.
+# define ASSERT_EXIT(statement, predicate, regex) \
+    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
+
+// Like ASSERT_EXIT, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_EXIT(statement, predicate, regex) \
+    GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given statement causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches regex.
+# define ASSERT_DEATH(statement, regex) \
+    ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Like ASSERT_DEATH, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_DEATH(statement, regex) \
+    EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+  explicit ExitedWithCode(int exit_code);
+  bool operator()(int exit_status) const;
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ExitedWithCode& other);
+
+  const int exit_code_;
+};
+
+# if !GTEST_OS_WINDOWS
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+class GTEST_API_ KilledBySignal {
+ public:
+  explicit KilledBySignal(int signum);
+  bool operator()(int exit_status) const;
+ private:
+  const int signum_;
+};
+# endif  // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+//   if (sideeffect) {
+//     *sideeffect = 12;
+//   }
+//   LOG(DFATAL) << "death";
+//   return 12;
+// }
+//
+// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
+//   int sideeffect = 0;
+//   // Only asserts in dbg.
+//   EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+//   // opt-mode has sideeffect visible.
+//   EXPECT_EQ(12, sideeffect);
+// #else
+//   // dbg-mode no visible sideeffect.
+//   EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects.  A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+//   // Side-effects here will have an effect after this statement in
+//   // opt mode, but none in debug mode.
+//   EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+# ifdef NDEBUG
+
+#  define EXPECT_DEBUG_DEATH(statement, regex) \
+  GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+#  define ASSERT_DEBUG_DEATH(statement, regex) \
+  GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# else
+
+#  define EXPECT_DEBUG_DEATH(statement, regex) \
+  EXPECT_DEATH(statement, regex)
+
+#  define ASSERT_DEBUG_DEATH(statement, regex) \
+  ASSERT_DEATH(statement, regex)
+
+# endif  // NDEBUG for EXPECT_DEBUG_DEATH
+#endif  // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning.  This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+    EXPECT_DEATH(statement, regex)
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+    ASSERT_DEATH(statement, regex)
+#else
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+    GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+// This file was GENERATED by command:
+//     pump.py gtest-param-test.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+  // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+  // Inside a test, access the test parameter with the GetParam() method
+  // of the TestWithParam<T> class:
+  EXPECT_TRUE(foo.Blah(GetParam()));
+  ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+  ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a  summary of them, which
+// are all in the testing namespace:
+//
+//
+//  Range(begin, end [, step]) - Yields values {begin, begin+step,
+//                               begin+step+step, ...}. The values do not
+//                               include end. step defaults to 1.
+//  Values(v1, v2, ..., vN)    - Yields values {v1, v2, ..., vN}.
+//  ValuesIn(container)        - Yields values from a C-style array, an STL
+//  ValuesIn(begin,end)          container, or an iterator range [begin, end).
+//  Bool()                     - Yields sequence {false, true}.
+//  Combine(g1, g2, ..., gN)   - Yields all combinations (the Cartesian product
+//                               for the math savvy) of the values generated
+//                               by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+                        FooTest,
+                        Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+//    * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+//    * InstantiationName/FooTest.DoesBlah/1 for "miny"
+//    * InstantiationName/FooTest.DoesBlah/2 for "moe"
+//    * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+//    * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+//    * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+//    * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+//    * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+//    * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+//    * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+  // You can inherit all the usual members for a non-parameterized test
+  // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+  // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+  // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+  // GetParam works just the same here as if you inherit from TestWithParam.
+  EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif  // 0
+
+
+#if !GTEST_OS_SYMBIAN
+# include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*.  Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <ctype.h>
+
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*.  Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2003 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Dan Egnor (egnor@google.com)
+//
+// A "smart" pointer type with reference tracking.  Every pointer to a
+// particular object is kept on a circular linked list.  When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is assigned, the entire list of pointers to that
+//   object is traversed.  This class is therefore NOT SUITABLE when there
+//   will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+//   If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+//   will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Bill Gibbons suggested we use something like this.
+//
+// Thread Safety:
+//   Unlike other linked_ptr implementations, in this implementation
+//   a linked_ptr object is thread-safe in the sense that:
+//     - it's safe to copy linked_ptr objects concurrently,
+//     - it's safe to copy *from* a linked_ptr and read its underlying
+//       raw pointer (e.g. via get()) concurrently, and
+//     - it's safe to write to two linked_ptrs that point to the same
+//       shared object concurrently.
+// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
+// confusion with normal linked_ptr.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#include <stdlib.h>
+#include <assert.h>
+
+
+namespace testing {
+namespace internal {
+
+// Protects copying of all linked_ptr objects.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// This is used internally by all instances of linked_ptr<>.  It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF.  Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+  // Create a new circle that includes only this instance.
+  void join_new() {
+    next_ = this;
+  }
+
+  // Many linked_ptr operations may change p.link_ for some linked_ptr
+  // variable p in the same circle as this object.  Therefore we need
+  // to prevent two such operations from occurring concurrently.
+  //
+  // Note that different types of linked_ptr objects can coexist in a
+  // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
+  // linked_ptr<Derived2>).  Therefore we must use a single mutex to
+  // protect all linked_ptr objects.  This can create serious
+  // contention in production code, but is acceptable in a testing
+  // framework.
+
+  // Join an existing circle.
+  void join(linked_ptr_internal const* ptr)
+      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+    MutexLock lock(&g_linked_ptr_mutex);
+
+    linked_ptr_internal const* p = ptr;
+    while (p->next_ != ptr) {
+      assert(p->next_ != this &&
+             "Trying to join() a linked ring we are already in. "
+             "Is GMock thread safety enabled?");
+      p = p->next_;
+    }
+    p->next_ = this;
+    next_ = ptr;
+  }
+
+  // Leave whatever circle we're part of.  Returns true if we were the
+  // last member of the circle.  Once this is done, you can join() another.
+  bool depart()
+      GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+    MutexLock lock(&g_linked_ptr_mutex);
+
+    if (next_ == this) return true;
+    linked_ptr_internal const* p = next_;
+    while (p->next_ != this) {
+      assert(p->next_ != next_ &&
+             "Trying to depart() a linked ring we are not in. "
+             "Is GMock thread safety enabled?");
+      p = p->next_;
+    }
+    p->next_ = next_;
+    return false;
+  }
+
+ private:
+  mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+  typedef T element_type;
+
+  // Take over ownership of a raw pointer.  This should happen as soon as
+  // possible after the object is created.
+  explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+  ~linked_ptr() { depart(); }
+
+  // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+  template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+  linked_ptr(linked_ptr const& ptr) {  // NOLINT
+    assert(&ptr != this);
+    copy(&ptr);
+  }
+
+  // Assignment releases the old value and acquires the new.
+  template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+    depart();
+    copy(&ptr);
+    return *this;
+  }
+
+  linked_ptr& operator=(linked_ptr const& ptr) {
+    if (&ptr != this) {
+      depart();
+      copy(&ptr);
+    }
+    return *this;
+  }
+
+  // Smart pointer members.
+  void reset(T* ptr = NULL) {
+    depart();
+    capture(ptr);
+  }
+  T* get() const { return value_; }
+  T* operator->() const { return value_; }
+  T& operator*() const { return *value_; }
+
+  bool operator==(T* p) const { return value_ == p; }
+  bool operator!=(T* p) const { return value_ != p; }
+  template <typename U>
+  bool operator==(linked_ptr<U> const& ptr) const {
+    return value_ == ptr.get();
+  }
+  template <typename U>
+  bool operator!=(linked_ptr<U> const& ptr) const {
+    return value_ != ptr.get();
+  }
+
+ private:
+  template <typename U>
+  friend class linked_ptr;
+
+  T* value_;
+  linked_ptr_internal link_;
+
+  void depart() {
+    if (link_.depart()) delete value_;
+  }
+
+  void capture(T* ptr) {
+    value_ = ptr;
+    link_.join_new();
+  }
+
+  template <typename U> void copy(linked_ptr<U> const* ptr) {
+    value_ = ptr->get();
+    if (value_)
+      link_.join(&ptr->link_);
+    else
+      link_.join_new();
+  }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+  return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+  return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+  return linked_ptr<T>(ptr);
+}
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+//   void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// A user can teach this function how to print a class type T by
+// defining either operator<<() or PrintTo() in the namespace that
+// defines T.  More specifically, the FIRST defined function in the
+// following list will be used (assuming T is defined in namespace
+// foo):
+//
+//   1. foo::PrintTo(const T&, ostream*)
+//   2. operator<<(ostream&, const T&) defined in either foo or the
+//      global namespace.
+//
+// If none of the above is defined, it will print the debug string of
+// the value if it is a protocol buffer, or print the raw bytes in the
+// value otherwise.
+//
+// To aid debugging: when T is a reference type, the address of the
+// value is also printed; when T is a (const) char pointer, both the
+// pointer value and the NUL-terminated string it points to are
+// printed.
+//
+// We also provide some convenient wrappers:
+//
+//   // Prints a value to a string.  For a (const or not) char
+//   // pointer, the NUL-terminated string (but not the pointer) is
+//   // printed.
+//   std::string ::testing::PrintToString(const T& value);
+//
+//   // Prints a value tersely: for a reference type, the referenced
+//   // value (but not the address) is printed; for a (const or not) char
+//   // pointer, the NUL-terminated string (but not the pointer) is
+//   // printed.
+//   void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
+//
+//   // Prints value using the type inferred by the compiler.  The difference
+//   // from UniversalTersePrint() is that this function prints both the
+//   // pointer and the NUL-terminated string for a (const or not) char pointer.
+//   void ::testing::internal::UniversalPrint(const T& value, ostream*);
+//
+//   // Prints the fields of a tuple tersely to a string vector, one
+//   // element for each field. Tuple support must be enabled in
+//   // gtest-port.h.
+//   std::vector<string> UniversalTersePrintTupleFieldsToStrings(
+//       const Tuple& value);
+//
+// Known limitation:
+//
+// The print primitives print the elements of an STL-style container
+// using the compiler-inferred type of *iter where iter is a
+// const_iterator of the container.  When const_iterator is an input
+// iterator but not a forward iterator, this inferred type may not
+// match value_type, and the print output may be incorrect.  In
+// practice, this is rarely a problem as for most containers
+// const_iterator is a forward iterator.  We'll fix this if there's an
+// actual need for it.  Note that this fix cannot rely on value_type
+// being defined as many user-defined container types don't have
+// value_type.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#include <ostream>  // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#if GTEST_HAS_STD_TUPLE_
+# include <tuple>
+#endif
+
+namespace testing {
+
+// Definitions in the 'internal' and 'internal2' name spaces are
+// subject to change without notice.  DO NOT USE THEM IN USER CODE!
+namespace internal2 {
+
+// Prints the given number of bytes in the given object to the given
+// ostream.
+GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
+                                     size_t count,
+                                     ::std::ostream* os);
+
+// For selecting which printer to use when a given type has neither <<
+// nor PrintTo().
+enum TypeKind {
+  kProtobuf,              // a protobuf type
+  kConvertibleToInteger,  // a type implicitly convertible to BiggestInt
+                          // (e.g. a named or unnamed enum type)
+  kOtherType              // anything else
+};
+
+// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called
+// by the universal printer to print a value of type T when neither
+// operator<< nor PrintTo() is defined for T, where kTypeKind is the
+// "kind" of T as defined by enum TypeKind.
+template <typename T, TypeKind kTypeKind>
+class TypeWithoutFormatter {
+ public:
+  // This default version is called when kTypeKind is kOtherType.
+  static void PrintValue(const T& value, ::std::ostream* os) {
+    PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),
+                         sizeof(value), os);
+  }
+};
+
+// We print a protobuf using its ShortDebugString() when the string
+// doesn't exceed this many characters; otherwise we print it using
+// DebugString() for better readability.
+const size_t kProtobufOneLinerMaxLength = 50;
+
+template <typename T>
+class TypeWithoutFormatter<T, kProtobuf> {
+ public:
+  static void PrintValue(const T& value, ::std::ostream* os) {
+    const ::testing::internal::string short_str = value.ShortDebugString();
+    const ::testing::internal::string pretty_str =
+        short_str.length() <= kProtobufOneLinerMaxLength ?
+        short_str : ("\n" + value.DebugString());
+    *os << ("<" + pretty_str + ">");
+  }
+};
+
+template <typename T>
+class TypeWithoutFormatter<T, kConvertibleToInteger> {
+ public:
+  // Since T has no << operator or PrintTo() but can be implicitly
+  // converted to BiggestInt, we print it as a BiggestInt.
+  //
+  // Most likely T is an enum type (either named or unnamed), in which
+  // case printing it as an integer is the desired behavior.  In case
+  // T is not an enum, printing it as an integer is the best we can do
+  // given that it has no user-defined printer.
+  static void PrintValue(const T& value, ::std::ostream* os) {
+    const internal::BiggestInt kBigInt = value;
+    *os << kBigInt;
+  }
+};
+
+// Prints the given value to the given ostream.  If the value is a
+// protocol message, its debug string is printed; if it's an enum or
+// of a type implicitly convertible to BiggestInt, it's printed as an
+// integer; otherwise the bytes in the value are printed.  This is
+// what UniversalPrinter<T>::Print() does when it knows nothing about
+// type T and T has neither << operator nor PrintTo().
+//
+// A user can override this behavior for a class type Foo by defining
+// a << operator in the namespace where Foo is defined.
+//
+// We put this operator in namespace 'internal2' instead of 'internal'
+// to simplify the implementation, as much code in 'internal' needs to
+// use << in STL, which would conflict with our own << were it defined
+// in 'internal'.
+//
+// Note that this operator<< takes a generic std::basic_ostream<Char,
+// CharTraits> type instead of the more restricted std::ostream.  If
+// we define it to take an std::ostream instead, we'll get an
+// "ambiguous overloads" compiler error when trying to print a type
+// Foo that supports streaming to std::basic_ostream<Char,
+// CharTraits>, as the compiler cannot tell whether
+// operator<<(std::ostream&, const T&) or
+// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
+// specific.
+template <typename Char, typename CharTraits, typename T>
+::std::basic_ostream<Char, CharTraits>& operator<<(
+    ::std::basic_ostream<Char, CharTraits>& os, const T& x) {
+  TypeWithoutFormatter<T,
+      (internal::IsAProtocolMessage<T>::value ? kProtobuf :
+       internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?
+       kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);
+  return os;
+}
+
+}  // namespace internal2
+}  // namespace testing
+
+// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
+// magic needed for implementing UniversalPrinter won't work.
+namespace testing_internal {
+
+// Used to print a value that is not an STL-style container when the
+// user doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
+  // With the following statement, during unqualified name lookup,
+  // testing::internal2::operator<< appears as if it was declared in
+  // the nearest enclosing namespace that contains both
+  // ::testing_internal and ::testing::internal2, i.e. the global
+  // namespace.  For more details, refer to the C++ Standard section
+  // 7.3.4-1 [namespace.udir].  This allows us to fall back onto
+  // testing::internal2::operator<< in case T doesn't come with a <<
+  // operator.
+  //
+  // We cannot write 'using ::testing::internal2::operator<<;', which
+  // gcc 3.3 fails to compile due to a compiler bug.
+  using namespace ::testing::internal2;  // NOLINT
+
+  // Assuming T is defined in namespace foo, in the next statement,
+  // the compiler will consider all of:
+  //
+  //   1. foo::operator<< (thanks to Koenig look-up),
+  //   2. ::operator<< (as the current namespace is enclosed in ::),
+  //   3. testing::internal2::operator<< (thanks to the using statement above).
+  //
+  // The operator<< whose type matches T best will be picked.
+  //
+  // We deliberately allow #2 to be a candidate, as sometimes it's
+  // impossible to define #1 (e.g. when foo is ::std, defining
+  // anything in it is undefined behavior unless you are a compiler
+  // vendor.).
+  *os << value;
+}
+
+}  // namespace testing_internal
+
+namespace testing {
+namespace internal {
+
+// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
+// value of type ToPrint that is an operand of a comparison assertion
+// (e.g. ASSERT_EQ).  OtherOperand is the type of the other operand in
+// the comparison, and is used to help determine the best way to
+// format the value.  In particular, when the value is a C string
+// (char pointer) and the other operand is an STL string object, we
+// want to format the C string as a string, since we know it is
+// compared by value with the string object.  If the value is a char
+// pointer but the other operand is not an STL string object, we don't
+// know whether the pointer is supposed to point to a NUL-terminated
+// string, and thus want to print it as a pointer to be safe.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// The default case.
+template <typename ToPrint, typename OtherOperand>
+class FormatForComparison {
+ public:
+  static ::std::string Format(const ToPrint& value) {
+    return ::testing::PrintToString(value);
+  }
+};
+
+// Array.
+template <typename ToPrint, size_t N, typename OtherOperand>
+class FormatForComparison<ToPrint[N], OtherOperand> {
+ public:
+  static ::std::string Format(const ToPrint* value) {
+    return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);
+  }
+};
+
+// By default, print C string as pointers to be safe, as we don't know
+// whether they actually point to a NUL-terminated string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType)                \
+  template <typename OtherOperand>                                      \
+  class FormatForComparison<CharType*, OtherOperand> {                  \
+   public:                                                              \
+    static ::std::string Format(CharType* value) {                      \
+      return ::testing::PrintToString(static_cast<const void*>(value)); \
+    }                                                                   \
+  }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_
+
+// If a C string is compared with an STL string object, we know it's meant
+// to point to a NUL-terminated string, and thus can print it as a string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
+  template <>                                                           \
+  class FormatForComparison<CharType*, OtherStringType> {               \
+   public:                                                              \
+    static ::std::string Format(CharType* value) {                      \
+      return ::testing::PrintToString(value);                           \
+    }                                                                   \
+  }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);
+
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string);
+#endif
+
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring);
+#endif
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
+#endif
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message.  The type (but not value)
+// of the other operand may affect the format.  This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char* or void*, and print it as a C string when it is compared
+// against an std::string object, for example.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+std::string FormatForComparisonFailureMessage(
+    const T1& value, const T2& /* other_operand */) {
+  return FormatForComparison<T1, T2>::Format(value);
+}
+
+// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
+// value to the given ostream.  The caller must ensure that
+// 'ostream_ptr' is not NULL, or the behavior is undefined.
+//
+// We define UniversalPrinter as a class template (as opposed to a
+// function template), as we need to partially specialize it for
+// reference types, which cannot be done with function templates.
+template <typename T>
+class UniversalPrinter;
+
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os);
+
+// Used to print an STL-style container when the user doesn't define
+// a PrintTo() for it.
+template <typename C>
+void DefaultPrintTo(IsContainer /* dummy */,
+                    false_type /* is not a pointer */,
+                    const C& container, ::std::ostream* os) {
+  const size_t kMaxCount = 32;  // The maximum number of elements to print.
+  *os << '{';
+  size_t count = 0;
+  for (typename C::const_iterator it = container.begin();
+       it != container.end(); ++it, ++count) {
+    if (count > 0) {
+      *os << ',';
+      if (count == kMaxCount) {  // Enough has been printed.
+        *os << " ...";
+        break;
+      }
+    }
+    *os << ' ';
+    // We cannot call PrintTo(*it, os) here as PrintTo() doesn't
+    // handle *it being a native array.
+    internal::UniversalPrint(*it, os);
+  }
+
+  if (count > 0) {
+    *os << ' ';
+  }
+  *os << '}';
+}
+
+// Used to print a pointer that is neither a char pointer nor a member
+// pointer, when the user doesn't define PrintTo() for it.  (A member
+// variable pointer or member function pointer doesn't really point to
+// a location in the address space.  Their representation is
+// implementation-defined.  Therefore they will be printed as raw
+// bytes.)
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+                    true_type /* is a pointer */,
+                    T* p, ::std::ostream* os) {
+  if (p == NULL) {
+    *os << "NULL";
+  } else {
+    // C++ doesn't allow casting from a function pointer to any object
+    // pointer.
+    //
+    // IsTrue() silences warnings: "Condition is always true",
+    // "unreachable code".
+    if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {
+      // T is not a function type.  We just call << to print p,
+      // relying on ADL to pick up user-defined << for their pointer
+      // types, if any.
+      *os << p;
+    } else {
+      // T is a function type, so '*os << p' doesn't do what we want
+      // (it just prints p as bool).  We want to print p as a const
+      // void*.  However, we cannot cast it to const void* directly,
+      // even using reinterpret_cast, as earlier versions of gcc
+      // (e.g. 3.4.5) cannot compile the cast when p is a function
+      // pointer.  Casting to UInt64 first solves the problem.
+      *os << reinterpret_cast<const void*>(
+          reinterpret_cast<internal::UInt64>(p));
+    }
+  }
+}
+
+// Used to print a non-container, non-pointer value when the user
+// doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+                    false_type /* is not a pointer */,
+                    const T& value, ::std::ostream* os) {
+  ::testing_internal::DefaultPrintNonContainerTo(value, os);
+}
+
+// Prints the given value using the << operator if it has one;
+// otherwise prints the bytes in it.  This is what
+// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
+// or overloaded for type T.
+//
+// A user can override this behavior for a class type Foo by defining
+// an overload of PrintTo() in the namespace where Foo is defined.  We
+// give the user this option as sometimes defining a << operator for
+// Foo is not desirable (e.g. the coding style may prevent doing it,
+// or there is already a << operator but it doesn't do what the user
+// wants).
+template <typename T>
+void PrintTo(const T& value, ::std::ostream* os) {
+  // DefaultPrintTo() is overloaded.  The type of its first two
+  // arguments determine which version will be picked.  If T is an
+  // STL-style container, the version for container will be called; if
+  // T is a pointer, the pointer version will be called; otherwise the
+  // generic version will be called.
+  //
+  // Note that we check for container types here, prior to we check
+  // for protocol message types in our operator<<.  The rationale is:
+  //
+  // For protocol messages, we want to give people a chance to
+  // override Google Mock's format by defining a PrintTo() or
+  // operator<<.  For STL containers, other formats can be
+  // incompatible with Google Mock's format for the container
+  // elements; therefore we check for container types here to ensure
+  // that our format is used.
+  //
+  // The second argument of DefaultPrintTo() is needed to bypass a bug
+  // in Symbian's C++ compiler that prevents it from picking the right
+  // overload between:
+  //
+  //   PrintTo(const T& x, ...);
+  //   PrintTo(T* x, ...);
+  DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);
+}
+
+// The following list of PrintTo() overloads tells
+// UniversalPrinter<T>::Print() how to print standard types (built-in
+// types, strings, plain arrays, and pointers).
+
+// Overloads for various char types.
+GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
+GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
+inline void PrintTo(char c, ::std::ostream* os) {
+  // When printing a plain char, we always treat it as unsigned.  This
+  // way, the output won't be affected by whether the compiler thinks
+  // char is signed or not.
+  PrintTo(static_cast<unsigned char>(c), os);
+}
+
+// Overloads for other simple built-in types.
+inline void PrintTo(bool x, ::std::ostream* os) {
+  *os << (x ? "true" : "false");
+}
+
+// Overload for wchar_t type.
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its decimal code (except for L'\0').
+// The L'\0' char is printed as "L'\\0'". The decimal code is printed
+// as signed integer when wchar_t is implemented by the compiler
+// as a signed type and is printed as an unsigned integer when wchar_t
+// is implemented as an unsigned type.
+GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
+
+// Overloads for C strings.
+GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
+inline void PrintTo(char* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const char*>(s), os);
+}
+
+// signed/unsigned char is often used for representing binary data, so
+// we print pointers to it as void* to be safe.
+inline void PrintTo(const signed char* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(signed char* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(unsigned char* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const void*>(s), os);
+}
+
+// MSVC can be configured to define wchar_t as a typedef of unsigned
+// short.  It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
+// type.  When wchar_t is a typedef, defining an overload for const
+// wchar_t* would cause unsigned short* be printed as a wide string,
+// possibly causing invalid memory accesses.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Overloads for wide C strings
+GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
+inline void PrintTo(wchar_t* s, ::std::ostream* os) {
+  PrintTo(ImplicitCast_<const wchar_t*>(s), os);
+}
+#endif
+
+// Overload for C arrays.  Multi-dimensional arrays are printed
+// properly.
+
+// Prints the given number of elements in an array, without printing
+// the curly braces.
+template <typename T>
+void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
+  UniversalPrint(a[0], os);
+  for (size_t i = 1; i != count; i++) {
+    *os << ", ";
+    UniversalPrint(a[i], os);
+  }
+}
+
+// Overloads for ::string and ::std::string.
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);
+inline void PrintTo(const ::string& s, ::std::ostream* os) {
+  PrintStringTo(s, os);
+}
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
+inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
+  PrintStringTo(s, os);
+}
+
+// Overloads for ::wstring and ::std::wstring.
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::wstring& s, ::std::ostream* os) {
+  PrintWideStringTo(s, os);
+}
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
+  PrintWideStringTo(s, os);
+}
+#endif  // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+// Helper function for printing a tuple.  T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os);
+#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+
+#if GTEST_HAS_TR1_TUPLE
+// Overload for ::std::tr1::tuple.  Needed for printing function arguments,
+// which are packed as tuples.
+
+// Overloaded PrintTo() for tuples of various arities.  We support
+// tuples of up-to 10 fields.  The following implementation works
+// regardless of whether tr1::tuple is implemented using the
+// non-standard variadic template feature or not.
+
+inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1>
+void PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2>
+void PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,
+             ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,
+             ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6, typename T7>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,
+             ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6, typename T7, typename T8>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,
+             ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6, typename T7, typename T8, typename T9>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,
+             ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+          typename T6, typename T7, typename T8, typename T9, typename T10>
+void PrintTo(
+    const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,
+    ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+#endif  // GTEST_HAS_TR1_TUPLE
+
+#if GTEST_HAS_STD_TUPLE_
+template <typename... Types>
+void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {
+  PrintTupleTo(t, os);
+}
+#endif  // GTEST_HAS_STD_TUPLE_
+
+// Overload for std::pair.
+template <typename T1, typename T2>
+void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
+  *os << '(';
+  // We cannot use UniversalPrint(value.first, os) here, as T1 may be
+  // a reference type.  The same for printing value.second.
+  UniversalPrinter<T1>::Print(value.first, os);
+  *os << ", ";
+  UniversalPrinter<T2>::Print(value.second, os);
+  *os << ')';
+}
+
+// Implements printing a non-reference type T by letting the compiler
+// pick the right overload of PrintTo() for T.
+template <typename T>
+class UniversalPrinter {
+ public:
+  // MSVC warns about adding const to a function type, so we want to
+  // disable the warning.
+  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+  // Note: we deliberately don't call this PrintTo(), as that name
+  // conflicts with ::testing::internal::PrintTo in the body of the
+  // function.
+  static void Print(const T& value, ::std::ostream* os) {
+    // By default, ::testing::internal::PrintTo() is used for printing
+    // the value.
+    //
+    // Thanks to Koenig look-up, if T is a class and has its own
+    // PrintTo() function defined in its namespace, that function will
+    // be visible here.  Since it is more specific than the generic ones
+    // in ::testing::internal, it will be picked by the compiler in the
+    // following statement - exactly what we want.
+    PrintTo(value, os);
+  }
+
+  GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// UniversalPrintArray(begin, len, os) prints an array of 'len'
+// elements, starting at address 'begin'.
+template <typename T>
+void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
+  if (len == 0) {
+    *os << "{}";
+  } else {
+    *os << "{ ";
+    const size_t kThreshold = 18;
+    const size_t kChunkSize = 8;
+    // If the array has more than kThreshold elements, we'll have to
+    // omit some details by printing only the first and the last
+    // kChunkSize elements.
+    // TODO(wan@google.com): let the user control the threshold using a flag.
+    if (len <= kThreshold) {
+      PrintRawArrayTo(begin, len, os);
+    } else {
+      PrintRawArrayTo(begin, kChunkSize, os);
+      *os << ", ..., ";
+      PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
+    }
+    *os << " }";
+  }
+}
+// This overload prints a (const) char array compactly.
+GTEST_API_ void UniversalPrintArray(
+    const char* begin, size_t len, ::std::ostream* os);
+
+// This overload prints a (const) wchar_t array compactly.
+GTEST_API_ void UniversalPrintArray(
+    const wchar_t* begin, size_t len, ::std::ostream* os);
+
+// Implements printing an array type T[N].
+template <typename T, size_t N>
+class UniversalPrinter<T[N]> {
+ public:
+  // Prints the given array, omitting some elements when there are too
+  // many.
+  static void Print(const T (&a)[N], ::std::ostream* os) {
+    UniversalPrintArray(a, N, os);
+  }
+};
+
+// Implements printing a reference type T&.
+template <typename T>
+class UniversalPrinter<T&> {
+ public:
+  // MSVC warns about adding const to a function type, so we want to
+  // disable the warning.
+  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+  static void Print(const T& value, ::std::ostream* os) {
+    // Prints the address of the value.  We use reinterpret_cast here
+    // as static_cast doesn't compile when T is a function type.
+    *os << "@" << reinterpret_cast<const void*>(&value) << " ";
+
+    // Then prints the value itself.
+    UniversalPrint(value, os);
+  }
+
+  GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// Prints a value tersely: for a reference type, the referenced value
+// (but not the address) is printed; for a (const) char pointer, the
+// NUL-terminated string (but not the pointer) is printed.
+
+template <typename T>
+class UniversalTersePrinter {
+ public:
+  static void Print(const T& value, ::std::ostream* os) {
+    UniversalPrint(value, os);
+  }
+};
+template <typename T>
+class UniversalTersePrinter<T&> {
+ public:
+  static void Print(const T& value, ::std::ostream* os) {
+    UniversalPrint(value, os);
+  }
+};
+template <typename T, size_t N>
+class UniversalTersePrinter<T[N]> {
+ public:
+  static void Print(const T (&value)[N], ::std::ostream* os) {
+    UniversalPrinter<T[N]>::Print(value, os);
+  }
+};
+template <>
+class UniversalTersePrinter<const char*> {
+ public:
+  static void Print(const char* str, ::std::ostream* os) {
+    if (str == NULL) {
+      *os << "NULL";
+    } else {
+      UniversalPrint(string(str), os);
+    }
+  }
+};
+template <>
+class UniversalTersePrinter<char*> {
+ public:
+  static void Print(char* str, ::std::ostream* os) {
+    UniversalTersePrinter<const char*>::Print(str, os);
+  }
+};
+
+#if GTEST_HAS_STD_WSTRING
+template <>
+class UniversalTersePrinter<const wchar_t*> {
+ public:
+  static void Print(const wchar_t* str, ::std::ostream* os) {
+    if (str == NULL) {
+      *os << "NULL";
+    } else {
+      UniversalPrint(::std::wstring(str), os);
+    }
+  }
+};
+#endif
+
+template <>
+class UniversalTersePrinter<wchar_t*> {
+ public:
+  static void Print(wchar_t* str, ::std::ostream* os) {
+    UniversalTersePrinter<const wchar_t*>::Print(str, os);
+  }
+};
+
+template <typename T>
+void UniversalTersePrint(const T& value, ::std::ostream* os) {
+  UniversalTersePrinter<T>::Print(value, os);
+}
+
+// Prints a value using the type inferred by the compiler.  The
+// difference between this and UniversalTersePrint() is that for a
+// (const) char pointer, this prints both the pointer and the
+// NUL-terminated string.
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os) {
+  // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
+  // UniversalPrinter with T directly.
+  typedef T T1;
+  UniversalPrinter<T1>::Print(value, os);
+}
+
+typedef ::std::vector<string> Strings;
+
+// TuplePolicy<TupleT> must provide:
+// - tuple_size
+//     size of tuple TupleT.
+// - get<size_t I>(const TupleT& t)
+//     static function extracting element I of tuple TupleT.
+// - tuple_element<size_t I>::type
+//     type of element I of tuple TupleT.
+template <typename TupleT>
+struct TuplePolicy;
+
+#if GTEST_HAS_TR1_TUPLE
+template <typename TupleT>
+struct TuplePolicy {
+  typedef TupleT Tuple;
+  static const size_t tuple_size = ::std::tr1::tuple_size<Tuple>::value;
+
+  template <size_t I>
+  struct tuple_element : ::std::tr1::tuple_element<I, Tuple> {};
+
+  template <size_t I>
+  static typename AddReference<
+      const typename ::std::tr1::tuple_element<I, Tuple>::type>::type get(
+      const Tuple& tuple) {
+    return ::std::tr1::get<I>(tuple);
+  }
+};
+template <typename TupleT>
+const size_t TuplePolicy<TupleT>::tuple_size;
+#endif  // GTEST_HAS_TR1_TUPLE
+
+#if GTEST_HAS_STD_TUPLE_
+template <typename... Types>
+struct TuplePolicy< ::std::tuple<Types...> > {
+  typedef ::std::tuple<Types...> Tuple;
+  static const size_t tuple_size = ::std::tuple_size<Tuple>::value;
+
+  template <size_t I>
+  struct tuple_element : ::std::tuple_element<I, Tuple> {};
+
+  template <size_t I>
+  static const typename ::std::tuple_element<I, Tuple>::type& get(
+      const Tuple& tuple) {
+    return ::std::get<I>(tuple);
+  }
+};
+template <typename... Types>
+const size_t TuplePolicy< ::std::tuple<Types...> >::tuple_size;
+#endif  // GTEST_HAS_STD_TUPLE_
+
+#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+// This helper template allows PrintTo() for tuples and
+// UniversalTersePrintTupleFieldsToStrings() to be defined by
+// induction on the number of tuple fields.  The idea is that
+// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N
+// fields in tuple t, and can be defined in terms of
+// TuplePrefixPrinter<N - 1>.
+//
+// The inductive case.
+template <size_t N>
+struct TuplePrefixPrinter {
+  // Prints the first N fields of a tuple.
+  template <typename Tuple>
+  static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+    TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);
+    GTEST_INTENTIONAL_CONST_COND_PUSH_()
+    if (N > 1) {
+    GTEST_INTENTIONAL_CONST_COND_POP_()
+      *os << ", ";
+    }
+    UniversalPrinter<
+        typename TuplePolicy<Tuple>::template tuple_element<N - 1>::type>
+        ::Print(TuplePolicy<Tuple>::template get<N - 1>(t), os);
+  }
+
+  // Tersely prints the first N fields of a tuple to a string vector,
+  // one element for each field.
+  template <typename Tuple>
+  static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+    TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);
+    ::std::stringstream ss;
+    UniversalTersePrint(TuplePolicy<Tuple>::template get<N - 1>(t), &ss);
+    strings->push_back(ss.str());
+  }
+};
+
+// Base case.
+template <>
+struct TuplePrefixPrinter<0> {
+  template <typename Tuple>
+  static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}
+
+  template <typename Tuple>
+  static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}
+};
+
+// Helper function for printing a tuple.
+// Tuple must be either std::tr1::tuple or std::tuple type.
+template <typename Tuple>
+void PrintTupleTo(const Tuple& t, ::std::ostream* os) {
+  *os << "(";
+  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::PrintPrefixTo(t, os);
+  *os << ")";
+}
+
+// Prints the fields of a tuple tersely to a string vector, one
+// element for each field.  See the comment before
+// UniversalTersePrint() for how we define "tersely".
+template <typename Tuple>
+Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
+  Strings result;
+  TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::
+      TersePrintPrefixToStrings(value, &result);
+  return result;
+}
+#endif  // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+
+}  // namespace internal
+
+template <typename T>
+::std::string PrintToString(const T& value) {
+  ::std::stringstream ss;
+  internal::UniversalTersePrinter<T>::Print(value, &ss);
+  return ss.str();
+}
+
+}  // namespace testing
+
+// Include any custom printer added by the local installation.
+// We must include this header at the end to make sure it can use the
+// declarations from this file.
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file provides an injection point for custom printers in a local
+// installation of gTest.
+// It will be included from gtest-printers.h and the overrides in this file
+// will be visible to everyone.
+// See documentation at gtest/gtest-printers.h for details on how to define a
+// custom printer.
+//
+// ** Custom implementation starts here **
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Input to a parameterized test name generator, describing a test parameter.
+// Consists of the parameter value and the integer parameter index.
+template <class ParamType>
+struct TestParamInfo {
+  TestParamInfo(const ParamType& a_param, size_t an_index) :
+    param(a_param),
+    index(an_index) {}
+  ParamType param;
+  size_t index;
+};
+
+// A builtin parameterized test name generator which returns the result of
+// testing::PrintToString.
+struct PrintToStringParamName {
+  template <class ParamType>
+  std::string operator()(const TestParamInfo<ParamType>& info) const {
+    return PrintToString(info.param);
+  }
+};
+
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test case. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+                                          CodeLocation code_location);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+  virtual ~ParamIteratorInterface() {}
+  // A pointer to the base generator instance.
+  // Used only for the purposes of iterator comparison
+  // to make sure that two iterators belong to the same generator.
+  virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+  // Advances iterator to point to the next element
+  // provided by the generator. The caller is responsible
+  // for not calling Advance() on an iterator equal to
+  // BaseGenerator()->End().
+  virtual void Advance() = 0;
+  // Clones the iterator object. Used for implementing copy semantics
+  // of ParamIterator<T>.
+  virtual ParamIteratorInterface* Clone() const = 0;
+  // Dereferences the current iterator and provides (read-only) access
+  // to the pointed value. It is the caller's responsibility not to call
+  // Current() on an iterator equal to BaseGenerator()->End().
+  // Used for implementing ParamGenerator<T>::operator*().
+  virtual const T* Current() const = 0;
+  // Determines whether the given iterator and other point to the same
+  // element in the sequence generated by the generator.
+  // Used for implementing ParamGenerator<T>::operator==().
+  virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+  typedef T value_type;
+  typedef const T& reference;
+  typedef ptrdiff_t difference_type;
+
+  // ParamIterator assumes ownership of the impl_ pointer.
+  ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+  ParamIterator& operator=(const ParamIterator& other) {
+    if (this != &other)
+      impl_.reset(other.impl_->Clone());
+    return *this;
+  }
+
+  const T& operator*() const { return *impl_->Current(); }
+  const T* operator->() const { return impl_->Current(); }
+  // Prefix version of operator++.
+  ParamIterator& operator++() {
+    impl_->Advance();
+    return *this;
+  }
+  // Postfix version of operator++.
+  ParamIterator operator++(int /*unused*/) {
+    ParamIteratorInterface<T>* clone = impl_->Clone();
+    impl_->Advance();
+    return ParamIterator(clone);
+  }
+  bool operator==(const ParamIterator& other) const {
+    return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+  }
+  bool operator!=(const ParamIterator& other) const {
+    return !(*this == other);
+  }
+
+ private:
+  friend class ParamGenerator<T>;
+  explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+  scoped_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+  typedef T ParamType;
+
+  virtual ~ParamGeneratorInterface() {}
+
+  // Generator interface definition
+  virtual ParamIteratorInterface<T>* Begin() const = 0;
+  virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+  typedef ParamIterator<T> iterator;
+
+  explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+  ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+  ParamGenerator& operator=(const ParamGenerator& other) {
+    impl_ = other.impl_;
+    return *this;
+  }
+
+  iterator begin() const { return iterator(impl_->Begin()); }
+  iterator end() const { return iterator(impl_->End()); }
+
+ private:
+  linked_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+  RangeGenerator(T begin, T end, IncrementT step)
+      : begin_(begin), end_(end),
+        step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+  virtual ~RangeGenerator() {}
+
+  virtual ParamIteratorInterface<T>* Begin() const {
+    return new Iterator(this, begin_, 0, step_);
+  }
+  virtual ParamIteratorInterface<T>* End() const {
+    return new Iterator(this, end_, end_index_, step_);
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<T> {
+   public:
+    Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+             IncrementT step)
+        : base_(base), value_(value), index_(index), step_(step) {}
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+      return base_;
+    }
+    virtual void Advance() {
+      value_ = static_cast<T>(value_ + step_);
+      index_++;
+    }
+    virtual ParamIteratorInterface<T>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const T* Current() const { return &value_; }
+    virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const int other_index =
+          CheckedDowncastToActualType<const Iterator>(&other)->index_;
+      return index_ == other_index;
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : ParamIteratorInterface<T>(),
+          base_(other.base_), value_(other.value_), index_(other.index_),
+          step_(other.step_) {}
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<T>* const base_;
+    T value_;
+    int index_;
+    const IncrementT step_;
+  };  // class RangeGenerator::Iterator
+
+  static int CalculateEndIndex(const T& begin,
+                               const T& end,
+                               const IncrementT& step) {
+    int end_index = 0;
+    for (T i = begin; i < end; i = static_cast<T>(i + step))
+      end_index++;
+    return end_index;
+  }
+
+  // No implementation - assignment is unsupported.
+  void operator=(const RangeGenerator& other);
+
+  const T begin_;
+  const T end_;
+  const IncrementT step_;
+  // The index for the end() iterator. All the elements in the generated
+  // sequence are indexed (0-based) to aid iterator comparison.
+  const int end_index_;
+};  // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+  template <typename ForwardIterator>
+  ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+      : container_(begin, end) {}
+  virtual ~ValuesInIteratorRangeGenerator() {}
+
+  virtual ParamIteratorInterface<T>* Begin() const {
+    return new Iterator(this, container_.begin());
+  }
+  virtual ParamIteratorInterface<T>* End() const {
+    return new Iterator(this, container_.end());
+  }
+
+ private:
+  typedef typename ::std::vector<T> ContainerType;
+
+  class Iterator : public ParamIteratorInterface<T> {
+   public:
+    Iterator(const ParamGeneratorInterface<T>* base,
+             typename ContainerType::const_iterator iterator)
+        : base_(base), iterator_(iterator) {}
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+      return base_;
+    }
+    virtual void Advance() {
+      ++iterator_;
+      value_.reset();
+    }
+    virtual ParamIteratorInterface<T>* Clone() const {
+      return new Iterator(*this);
+    }
+    // We need to use cached value referenced by iterator_ because *iterator_
+    // can return a temporary object (and of type other then T), so just
+    // having "return &*iterator_;" doesn't work.
+    // value_ is updated here and not in Advance() because Advance()
+    // can advance iterator_ beyond the end of the range, and we cannot
+    // detect that fact. The client code, on the other hand, is
+    // responsible for not calling Current() on an out-of-range iterator.
+    virtual const T* Current() const {
+      if (value_.get() == NULL)
+        value_.reset(new T(*iterator_));
+      return value_.get();
+    }
+    virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      return iterator_ ==
+          CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+    }
+
+   private:
+    Iterator(const Iterator& other)
+          // The explicit constructor call suppresses a false warning
+          // emitted by gcc when supplied with the -Wextra option.
+        : ParamIteratorInterface<T>(),
+          base_(other.base_),
+          iterator_(other.iterator_) {}
+
+    const ParamGeneratorInterface<T>* const base_;
+    typename ContainerType::const_iterator iterator_;
+    // A cached value of *iterator_. We keep it here to allow access by
+    // pointer in the wrapping iterator's operator->().
+    // value_ needs to be mutable to be accessed in Current().
+    // Use of scoped_ptr helps manage cached value's lifetime,
+    // which is bound by the lifespan of the iterator itself.
+    mutable scoped_ptr<const T> value_;
+  };  // class ValuesInIteratorRangeGenerator::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const ValuesInIteratorRangeGenerator& other);
+
+  const ContainerType container_;
+};  // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Default parameterized test name generator, returns a string containing the
+// integer test parameter index.
+template <class ParamType>
+std::string DefaultParamName(const TestParamInfo<ParamType>& info) {
+  Message name_stream;
+  name_stream << info.index;
+  return name_stream.GetString();
+}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Parameterized test name overload helpers, which help the
+// INSTANTIATE_TEST_CASE_P macro choose between the default parameterized
+// test name generator and user param name generator.
+template <class ParamType, class ParamNameGenFunctor>
+ParamNameGenFunctor GetParamNameGen(ParamNameGenFunctor func) {
+  return func;
+}
+
+template <class ParamType>
+struct ParamNameGenFunc {
+  typedef std::string Type(const TestParamInfo<ParamType>&);
+};
+
+template <class ParamType>
+typename ParamNameGenFunc<ParamType>::Type *GetParamNameGen() {
+  return DefaultParamName;
+}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+  typedef typename TestClass::ParamType ParamType;
+  explicit ParameterizedTestFactory(ParamType parameter) :
+      parameter_(parameter) {}
+  virtual Test* CreateTest() {
+    TestClass::SetParam(&parameter_);
+    return new TestClass();
+  }
+
+ private:
+  const ParamType parameter_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+  virtual ~TestMetaFactoryBase() {}
+
+  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestCaseInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestCase>
+class TestMetaFactory
+    : public TestMetaFactoryBase<typename TestCase::ParamType> {
+ public:
+  typedef typename TestCase::ParamType ParamType;
+
+  TestMetaFactory() {}
+
+  virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
+    return new ParameterizedTestFactory<TestCase>(parameter);
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfoBase is a generic interface
+// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
+// a collection of pointers to the ParameterizedTestCaseInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestCaseInfoBase {
+ public:
+  virtual ~ParameterizedTestCaseInfoBase() {}
+
+  // Base part of test case name for display purposes.
+  virtual const string& GetTestCaseName() const = 0;
+  // Test case id to verify identity.
+  virtual TypeId GetTestCaseTypeId() const = 0;
+  // UnitTest class invokes this method to register tests in this
+  // test case right before running them in RUN_ALL_TESTS macro.
+  // This method should not be called more then once on any single
+  // instance of a ParameterizedTestCaseInfoBase derived class.
+  virtual void RegisterTests() = 0;
+
+ protected:
+  ParameterizedTestCaseInfoBase() {}
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test case and generators
+// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
+// test case. It registers tests with all values generated by all
+// generators when asked.
+template <class TestCase>
+class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
+ public:
+  // ParamType and GeneratorCreationFunc are private types but are required
+  // for declarations of public methods AddTestPattern() and
+  // AddTestCaseInstantiation().
+  typedef typename TestCase::ParamType ParamType;
+  // A function that returns an instance of appropriate generator type.
+  typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+  typedef typename ParamNameGenFunc<ParamType>::Type ParamNameGeneratorFunc;
+
+  explicit ParameterizedTestCaseInfo(
+      const char* name, CodeLocation code_location)
+      : test_case_name_(name), code_location_(code_location) {}
+
+  // Test case base name for display purposes.
+  virtual const string& GetTestCaseName() const { return test_case_name_; }
+  // Test case id to verify identity.
+  virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
+  // TEST_P macro uses AddTestPattern() to record information
+  // about a single test in a LocalTestInfo structure.
+  // test_case_name is the base name of the test case (without invocation
+  // prefix). test_base_name is the name of an individual test without
+  // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+  // test case base name and DoBar is test base name.
+  void AddTestPattern(const char* test_case_name,
+                      const char* test_base_name,
+                      TestMetaFactoryBase<ParamType>* meta_factory) {
+    tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
+                                                       test_base_name,
+                                                       meta_factory)));
+  }
+  // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
+  // about a generator.
+  int AddTestCaseInstantiation(const string& instantiation_name,
+                               GeneratorCreationFunc* func,
+                               ParamNameGeneratorFunc* name_func,
+                               const char* file,
+                               int line) {
+    instantiations_.push_back(
+        InstantiationInfo(instantiation_name, func, name_func, file, line));
+    return 0;  // Return value used only to run this method in namespace scope.
+  }
+  // UnitTest class invokes this method to register tests in this test case
+  // test cases right before running tests in RUN_ALL_TESTS macro.
+  // This method should not be called more then once on any single
+  // instance of a ParameterizedTestCaseInfoBase derived class.
+  // UnitTest has a guard to prevent from calling this method more then once.
+  virtual void RegisterTests() {
+    for (typename TestInfoContainer::iterator test_it = tests_.begin();
+         test_it != tests_.end(); ++test_it) {
+      linked_ptr<TestInfo> test_info = *test_it;
+      for (typename InstantiationContainer::iterator gen_it =
+               instantiations_.begin(); gen_it != instantiations_.end();
+               ++gen_it) {
+        const string& instantiation_name = gen_it->name;
+        ParamGenerator<ParamType> generator((*gen_it->generator)());
+        ParamNameGeneratorFunc* name_func = gen_it->name_func;
+        const char* file = gen_it->file;
+        int line = gen_it->line;
+
+        string test_case_name;
+        if ( !instantiation_name.empty() )
+          test_case_name = instantiation_name + "/";
+        test_case_name += test_info->test_case_base_name;
+
+        size_t i = 0;
+        std::set<std::string> test_param_names;
+        for (typename ParamGenerator<ParamType>::iterator param_it =
+                 generator.begin();
+             param_it != generator.end(); ++param_it, ++i) {
+          Message test_name_stream;
+
+          std::string param_name = name_func(
+              TestParamInfo<ParamType>(*param_it, i));
+
+          GTEST_CHECK_(IsValidParamName(param_name))
+              << "Parameterized test name '" << param_name
+              << "' is invalid, in " << file
+              << " line " << line << std::endl;
+
+          GTEST_CHECK_(test_param_names.count(param_name) == 0)
+              << "Duplicate parameterized test name '" << param_name
+              << "', in " << file << " line " << line << std::endl;
+
+          test_param_names.insert(param_name);
+
+          test_name_stream << test_info->test_base_name << "/" << param_name;
+          MakeAndRegisterTestInfo(
+              test_case_name.c_str(),
+              test_name_stream.GetString().c_str(),
+              NULL,  // No type parameter.
+              PrintToString(*param_it).c_str(),
+              code_location_,
+              GetTestCaseTypeId(),
+              TestCase::SetUpTestCase,
+              TestCase::TearDownTestCase,
+              test_info->test_meta_factory->CreateTestFactory(*param_it));
+        }  // for param_it
+      }  // for gen_it
+    }  // for test_it
+  }  // RegisterTests
+
+ private:
+  // LocalTestInfo structure keeps information about a single test registered
+  // with TEST_P macro.
+  struct TestInfo {
+    TestInfo(const char* a_test_case_base_name,
+             const char* a_test_base_name,
+             TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+        test_case_base_name(a_test_case_base_name),
+        test_base_name(a_test_base_name),
+        test_meta_factory(a_test_meta_factory) {}
+
+    const string test_case_base_name;
+    const string test_base_name;
+    const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+  };
+  typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
+  // Records data received from INSTANTIATE_TEST_CASE_P macros:
+  //  <Instantiation name, Sequence generator creation function,
+  //     Name generator function, Source file, Source line>
+  struct InstantiationInfo {
+      InstantiationInfo(const std::string &name_in,
+                        GeneratorCreationFunc* generator_in,
+                        ParamNameGeneratorFunc* name_func_in,
+                        const char* file_in,
+                        int line_in)
+          : name(name_in),
+            generator(generator_in),
+            name_func(name_func_in),
+            file(file_in),
+            line(line_in) {}
+
+      std::string name;
+      GeneratorCreationFunc* generator;
+      ParamNameGeneratorFunc* name_func;
+      const char* file;
+      int line;
+  };
+  typedef ::std::vector<InstantiationInfo> InstantiationContainer;
+
+  static bool IsValidParamName(const std::string& name) {
+    // Check for empty string
+    if (name.empty())
+      return false;
+
+    // Check for invalid characters
+    for (std::string::size_type index = 0; index < name.size(); ++index) {
+      if (!isalnum(name[index]) && name[index] != '_')
+        return false;
+    }
+
+    return true;
+  }
+
+  const string test_case_name_;
+  CodeLocation code_location_;
+  TestInfoContainer tests_;
+  InstantiationContainer instantiations_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
+};  // class ParameterizedTestCaseInfo
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
+// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
+// macros use it to locate their corresponding ParameterizedTestCaseInfo
+// descriptors.
+class ParameterizedTestCaseRegistry {
+ public:
+  ParameterizedTestCaseRegistry() {}
+  ~ParameterizedTestCaseRegistry() {
+    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+         it != test_case_infos_.end(); ++it) {
+      delete *it;
+    }
+  }
+
+  // Looks up or creates and returns a structure containing information about
+  // tests and instantiations of a particular test case.
+  template <class TestCase>
+  ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+      const char* test_case_name,
+      CodeLocation code_location) {
+    ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
+    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+         it != test_case_infos_.end(); ++it) {
+      if ((*it)->GetTestCaseName() == test_case_name) {
+        if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
+          // Complain about incorrect usage of Google Test facilities
+          // and terminate the program since we cannot guaranty correct
+          // test case setup and tear-down in this case.
+          ReportInvalidTestCaseType(test_case_name, code_location);
+          posix::Abort();
+        } else {
+          // At this point we are sure that the object we found is of the same
+          // type we are looking for, so we downcast it to that type
+          // without further checks.
+          typed_test_info = CheckedDowncastToActualType<
+              ParameterizedTestCaseInfo<TestCase> >(*it);
+        }
+        break;
+      }
+    }
+    if (typed_test_info == NULL) {
+      typed_test_info = new ParameterizedTestCaseInfo<TestCase>(
+          test_case_name, code_location);
+      test_case_infos_.push_back(typed_test_info);
+    }
+    return typed_test_info;
+  }
+  void RegisterTests() {
+    for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+         it != test_case_infos_.end(); ++it) {
+      (*it)->RegisterTests();
+    }
+  }
+
+ private:
+  typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
+
+  TestCaseInfoContainer test_case_infos_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
+};
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  //  GTEST_HAS_PARAM_TEST
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+// This file was GENERATED by command:
+//     pump.py gtest-param-util-generated.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT.  DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most 50 arguments in Values,
+// and at most 10 arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tuple which is
+// currently set at 10.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*.  Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+    const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+template <typename T1>
+class ValueArray1 {
+ public:
+  explicit ValueArray1(T1 v1) : v1_(v1) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray1& other);
+
+  const T1 v1_;
+};
+
+template <typename T1, typename T2>
+class ValueArray2 {
+ public:
+  ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray2& other);
+
+  const T1 v1_;
+  const T2 v2_;
+};
+
+template <typename T1, typename T2, typename T3>
+class ValueArray3 {
+ public:
+  ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray3& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ValueArray4 {
+ public:
+  ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray4& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ValueArray5 {
+ public:
+  ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray5& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+class ValueArray6 {
+ public:
+  ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray6& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+class ValueArray7 {
+ public:
+  ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray7& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+class ValueArray8 {
+ public:
+  ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+      T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray8& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+class ValueArray9 {
+ public:
+  ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+      T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray9& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+class ValueArray10 {
+ public:
+  ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray10& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11>
+class ValueArray11 {
+ public:
+  ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray11& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12>
+class ValueArray12 {
+ public:
+  ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray12& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13>
+class ValueArray13 {
+ public:
+  ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+      v12_(v12), v13_(v13) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray13& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14>
+class ValueArray14 {
+ public:
+  ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray14& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15>
+class ValueArray15 {
+ public:
+  ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray15& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16>
+class ValueArray16 {
+ public:
+  ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+      v16_(v16) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray16& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17>
+class ValueArray17 {
+ public:
+  ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+      T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray17& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18>
+class ValueArray18 {
+ public:
+  ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray18& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19>
+class ValueArray19 {
+ public:
+  ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray19& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20>
+class ValueArray20 {
+ public:
+  ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+      v19_(v19), v20_(v20) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray20& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21>
+class ValueArray21 {
+ public:
+  ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+      v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray21& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22>
+class ValueArray22 {
+ public:
+  ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray22& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23>
+class ValueArray23 {
+ public:
+  ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray23& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24>
+class ValueArray24 {
+ public:
+  ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+      v22_(v22), v23_(v23), v24_(v24) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray24& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25>
+class ValueArray25 {
+ public:
+  ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+      T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray25& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26>
+class ValueArray26 {
+ public:
+  ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray26& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27>
+class ValueArray27 {
+ public:
+  ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+      v26_(v26), v27_(v27) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray27& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28>
+class ValueArray28 {
+ public:
+  ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+      v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray28& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29>
+class ValueArray29 {
+ public:
+  ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray29& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30>
+class ValueArray30 {
+ public:
+  ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray30& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31>
+class ValueArray31 {
+ public:
+  ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30), v31_(v31) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray31& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32>
+class ValueArray32 {
+ public:
+  ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray32& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33>
+class ValueArray33 {
+ public:
+  ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+      T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray33& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34>
+class ValueArray34 {
+ public:
+  ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33), v34_(v34) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray34& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35>
+class ValueArray35 {
+ public:
+  ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+      v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray35& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36>
+class ValueArray36 {
+ public:
+  ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray36& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37>
+class ValueArray37 {
+ public:
+  ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+      v36_(v36), v37_(v37) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray37& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38>
+class ValueArray38 {
+ public:
+  ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+      v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray38& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39>
+class ValueArray39 {
+ public:
+  ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray39& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40>
+class ValueArray40 {
+ public:
+  ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+      v40_(v40) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray40& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41>
+class ValueArray41 {
+ public:
+  ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+      T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+      v39_(v39), v40_(v40), v41_(v41) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray41& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42>
+class ValueArray42 {
+ public:
+  ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+      v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray42& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43>
+class ValueArray43 {
+ public:
+  ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+      v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+      v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+      v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+      v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+      v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
+      v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray43& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44>
+class ValueArray44 {
+ public:
+  ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+      v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+      v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+      v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+      v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+      v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
+      v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
+      v43_(v43), v44_(v44) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray44& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45>
+class ValueArray45 {
+ public:
+  ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+      v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+      v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+      v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+      v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+      v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+      v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
+      v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray45& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46>
+class ValueArray46 {
+ public:
+  ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
+      v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_), static_cast<T>(v46_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray46& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+  const T46 v46_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47>
+class ValueArray47 {
+ public:
+  ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
+      v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+      v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+      v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+      v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+      v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+      v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+      v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
+      v47_(v47) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray47& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+  const T46 v46_;
+  const T47 v47_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48>
+class ValueArray48 {
+ public:
+  ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
+      v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+      v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+      v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+      v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+      v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+      v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+      v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
+      v46_(v46), v47_(v47), v48_(v48) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+        static_cast<T>(v48_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray48& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+  const T46 v46_;
+  const T47 v47_;
+  const T48 v48_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49>
+class ValueArray49 {
+ public:
+  ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
+      T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+        static_cast<T>(v48_), static_cast<T>(v49_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray49& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+  const T46 v46_;
+  const T47 v47_;
+  const T48 v48_;
+  const T49 v49_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49, typename T50>
+class ValueArray50 {
+ public:
+  ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+      T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+      T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+      T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+      T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+      T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
+      T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+      v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+      v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+      v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+      v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+      v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+      v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+      v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
+
+  template <typename T>
+  operator ParamGenerator<T>() const {
+    const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+        static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+        static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+        static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+        static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+        static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+        static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+        static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+        static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+        static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+        static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+        static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+        static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+        static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+        static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+        static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+        static_cast<T>(v48_), static_cast<T>(v49_), static_cast<T>(v50_)};
+    return ValuesIn(array);
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const ValueArray50& other);
+
+  const T1 v1_;
+  const T2 v2_;
+  const T3 v3_;
+  const T4 v4_;
+  const T5 v5_;
+  const T6 v6_;
+  const T7 v7_;
+  const T8 v8_;
+  const T9 v9_;
+  const T10 v10_;
+  const T11 v11_;
+  const T12 v12_;
+  const T13 v13_;
+  const T14 v14_;
+  const T15 v15_;
+  const T16 v16_;
+  const T17 v17_;
+  const T18 v18_;
+  const T19 v19_;
+  const T20 v20_;
+  const T21 v21_;
+  const T22 v22_;
+  const T23 v23_;
+  const T24 v24_;
+  const T25 v25_;
+  const T26 v26_;
+  const T27 v27_;
+  const T28 v28_;
+  const T29 v29_;
+  const T30 v30_;
+  const T31 v31_;
+  const T32 v32_;
+  const T33 v33_;
+  const T34 v34_;
+  const T35 v35_;
+  const T36 v36_;
+  const T37 v37_;
+  const T38 v38_;
+  const T39 v39_;
+  const T40 v40_;
+  const T41 v41_;
+  const T42 v42_;
+  const T43 v43_;
+  const T44 v44_;
+  const T45 v45_;
+  const T46 v46_;
+  const T47 v47_;
+  const T48 v48_;
+  const T49 v49_;
+  const T50 v50_;
+};
+
+# if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+template <typename T1, typename T2>
+class CartesianProductGenerator2
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2> > {
+ public:
+  typedef ::testing::tuple<T1, T2> ParamType;
+
+  CartesianProductGenerator2(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2)
+      : g1_(g1), g2_(g2) {}
+  virtual ~CartesianProductGenerator2() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current2_;
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator2::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator2& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+};  // class CartesianProductGenerator2
+
+
+template <typename T1, typename T2, typename T3>
+class CartesianProductGenerator3
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3> ParamType;
+
+  CartesianProductGenerator3(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
+      : g1_(g1), g2_(g2), g3_(g3) {}
+  virtual ~CartesianProductGenerator3() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current3_;
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator3::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator3& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+};  // class CartesianProductGenerator3
+
+
+template <typename T1, typename T2, typename T3, typename T4>
+class CartesianProductGenerator4
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4> ParamType;
+
+  CartesianProductGenerator4(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+  virtual ~CartesianProductGenerator4() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current4_;
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator4::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator4& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+};  // class CartesianProductGenerator4
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class CartesianProductGenerator5
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5> ParamType;
+
+  CartesianProductGenerator5(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+  virtual ~CartesianProductGenerator5() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current5_;
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator5::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator5& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+};  // class CartesianProductGenerator5
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+class CartesianProductGenerator6
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5,
+        T6> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6> ParamType;
+
+  CartesianProductGenerator6(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+      const ParamGenerator<T6>& g6)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+  virtual ~CartesianProductGenerator6() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5,
+      const ParamGenerator<T6>& g6,
+      const typename ParamGenerator<T6>::iterator& current6)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+          begin6_(g6.begin()), end6_(g6.end()), current6_(current6)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current6_;
+      if (current6_ == end6_) {
+        current6_ = begin6_;
+        ++current5_;
+      }
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_ &&
+          current6_ == typed_other->current6_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_),
+        begin6_(other.begin6_),
+        end6_(other.end6_),
+        current6_(other.current6_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_, *current6_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_ ||
+          current6_ == end6_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    const typename ParamGenerator<T6>::iterator begin6_;
+    const typename ParamGenerator<T6>::iterator end6_;
+    typename ParamGenerator<T6>::iterator current6_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator6::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator6& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+  const ParamGenerator<T6> g6_;
+};  // class CartesianProductGenerator6
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+class CartesianProductGenerator7
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+        T7> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
+
+  CartesianProductGenerator7(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+  virtual ~CartesianProductGenerator7() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+        g7_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5,
+      const ParamGenerator<T6>& g6,
+      const typename ParamGenerator<T6>::iterator& current6,
+      const ParamGenerator<T7>& g7,
+      const typename ParamGenerator<T7>::iterator& current7)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+          begin7_(g7.begin()), end7_(g7.end()), current7_(current7)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current7_;
+      if (current7_ == end7_) {
+        current7_ = begin7_;
+        ++current6_;
+      }
+      if (current6_ == end6_) {
+        current6_ = begin6_;
+        ++current5_;
+      }
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_ &&
+          current6_ == typed_other->current6_ &&
+          current7_ == typed_other->current7_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_),
+        begin6_(other.begin6_),
+        end6_(other.end6_),
+        current6_(other.current6_),
+        begin7_(other.begin7_),
+        end7_(other.end7_),
+        current7_(other.current7_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_, *current6_, *current7_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_ ||
+          current6_ == end6_ ||
+          current7_ == end7_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    const typename ParamGenerator<T6>::iterator begin6_;
+    const typename ParamGenerator<T6>::iterator end6_;
+    typename ParamGenerator<T6>::iterator current6_;
+    const typename ParamGenerator<T7>::iterator begin7_;
+    const typename ParamGenerator<T7>::iterator end7_;
+    typename ParamGenerator<T7>::iterator current7_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator7::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator7& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+  const ParamGenerator<T6> g6_;
+  const ParamGenerator<T7> g7_;
+};  // class CartesianProductGenerator7
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+class CartesianProductGenerator8
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+        T7, T8> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
+
+  CartesianProductGenerator8(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+      const ParamGenerator<T8>& g8)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+          g8_(g8) {}
+  virtual ~CartesianProductGenerator8() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+        g7_.begin(), g8_, g8_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+        g8_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5,
+      const ParamGenerator<T6>& g6,
+      const typename ParamGenerator<T6>::iterator& current6,
+      const ParamGenerator<T7>& g7,
+      const typename ParamGenerator<T7>::iterator& current7,
+      const ParamGenerator<T8>& g8,
+      const typename ParamGenerator<T8>::iterator& current8)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+          begin8_(g8.begin()), end8_(g8.end()), current8_(current8)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current8_;
+      if (current8_ == end8_) {
+        current8_ = begin8_;
+        ++current7_;
+      }
+      if (current7_ == end7_) {
+        current7_ = begin7_;
+        ++current6_;
+      }
+      if (current6_ == end6_) {
+        current6_ = begin6_;
+        ++current5_;
+      }
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_ &&
+          current6_ == typed_other->current6_ &&
+          current7_ == typed_other->current7_ &&
+          current8_ == typed_other->current8_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_),
+        begin6_(other.begin6_),
+        end6_(other.end6_),
+        current6_(other.current6_),
+        begin7_(other.begin7_),
+        end7_(other.end7_),
+        current7_(other.current7_),
+        begin8_(other.begin8_),
+        end8_(other.end8_),
+        current8_(other.current8_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_, *current6_, *current7_, *current8_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_ ||
+          current6_ == end6_ ||
+          current7_ == end7_ ||
+          current8_ == end8_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    const typename ParamGenerator<T6>::iterator begin6_;
+    const typename ParamGenerator<T6>::iterator end6_;
+    typename ParamGenerator<T6>::iterator current6_;
+    const typename ParamGenerator<T7>::iterator begin7_;
+    const typename ParamGenerator<T7>::iterator end7_;
+    typename ParamGenerator<T7>::iterator current7_;
+    const typename ParamGenerator<T8>::iterator begin8_;
+    const typename ParamGenerator<T8>::iterator end8_;
+    typename ParamGenerator<T8>::iterator current8_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator8::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator8& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+  const ParamGenerator<T6> g6_;
+  const ParamGenerator<T7> g7_;
+  const ParamGenerator<T8> g8_;
+};  // class CartesianProductGenerator8
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+class CartesianProductGenerator9
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+        T7, T8, T9> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
+
+  CartesianProductGenerator9(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+          g9_(g9) {}
+  virtual ~CartesianProductGenerator9() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+        g8_.end(), g9_, g9_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5,
+      const ParamGenerator<T6>& g6,
+      const typename ParamGenerator<T6>::iterator& current6,
+      const ParamGenerator<T7>& g7,
+      const typename ParamGenerator<T7>::iterator& current7,
+      const ParamGenerator<T8>& g8,
+      const typename ParamGenerator<T8>::iterator& current8,
+      const ParamGenerator<T9>& g9,
+      const typename ParamGenerator<T9>::iterator& current9)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+          begin9_(g9.begin()), end9_(g9.end()), current9_(current9)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current9_;
+      if (current9_ == end9_) {
+        current9_ = begin9_;
+        ++current8_;
+      }
+      if (current8_ == end8_) {
+        current8_ = begin8_;
+        ++current7_;
+      }
+      if (current7_ == end7_) {
+        current7_ = begin7_;
+        ++current6_;
+      }
+      if (current6_ == end6_) {
+        current6_ = begin6_;
+        ++current5_;
+      }
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_ &&
+          current6_ == typed_other->current6_ &&
+          current7_ == typed_other->current7_ &&
+          current8_ == typed_other->current8_ &&
+          current9_ == typed_other->current9_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_),
+        begin6_(other.begin6_),
+        end6_(other.end6_),
+        current6_(other.current6_),
+        begin7_(other.begin7_),
+        end7_(other.end7_),
+        current7_(other.current7_),
+        begin8_(other.begin8_),
+        end8_(other.end8_),
+        current8_(other.current8_),
+        begin9_(other.begin9_),
+        end9_(other.end9_),
+        current9_(other.current9_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_, *current6_, *current7_, *current8_,
+            *current9_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_ ||
+          current6_ == end6_ ||
+          current7_ == end7_ ||
+          current8_ == end8_ ||
+          current9_ == end9_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    const typename ParamGenerator<T6>::iterator begin6_;
+    const typename ParamGenerator<T6>::iterator end6_;
+    typename ParamGenerator<T6>::iterator current6_;
+    const typename ParamGenerator<T7>::iterator begin7_;
+    const typename ParamGenerator<T7>::iterator end7_;
+    typename ParamGenerator<T7>::iterator current7_;
+    const typename ParamGenerator<T8>::iterator begin8_;
+    const typename ParamGenerator<T8>::iterator end8_;
+    typename ParamGenerator<T8>::iterator current8_;
+    const typename ParamGenerator<T9>::iterator begin9_;
+    const typename ParamGenerator<T9>::iterator end9_;
+    typename ParamGenerator<T9>::iterator current9_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator9::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator9& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+  const ParamGenerator<T6> g6_;
+  const ParamGenerator<T7> g7_;
+  const ParamGenerator<T8> g8_;
+  const ParamGenerator<T9> g9_;
+};  // class CartesianProductGenerator9
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+class CartesianProductGenerator10
+    : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+        T7, T8, T9, T10> > {
+ public:
+  typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
+
+  CartesianProductGenerator10(const ParamGenerator<T1>& g1,
+      const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+      const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+      const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+      const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
+      const ParamGenerator<T10>& g10)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+          g9_(g9), g10_(g10) {}
+  virtual ~CartesianProductGenerator10() {}
+
+  virtual ParamIteratorInterface<ParamType>* Begin() const {
+    return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+        g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+        g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
+  }
+  virtual ParamIteratorInterface<ParamType>* End() const {
+    return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+        g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+        g8_.end(), g9_, g9_.end(), g10_, g10_.end());
+  }
+
+ private:
+  class Iterator : public ParamIteratorInterface<ParamType> {
+   public:
+    Iterator(const ParamGeneratorInterface<ParamType>* base,
+      const ParamGenerator<T1>& g1,
+      const typename ParamGenerator<T1>::iterator& current1,
+      const ParamGenerator<T2>& g2,
+      const typename ParamGenerator<T2>::iterator& current2,
+      const ParamGenerator<T3>& g3,
+      const typename ParamGenerator<T3>::iterator& current3,
+      const ParamGenerator<T4>& g4,
+      const typename ParamGenerator<T4>::iterator& current4,
+      const ParamGenerator<T5>& g5,
+      const typename ParamGenerator<T5>::iterator& current5,
+      const ParamGenerator<T6>& g6,
+      const typename ParamGenerator<T6>::iterator& current6,
+      const ParamGenerator<T7>& g7,
+      const typename ParamGenerator<T7>::iterator& current7,
+      const ParamGenerator<T8>& g8,
+      const typename ParamGenerator<T8>::iterator& current8,
+      const ParamGenerator<T9>& g9,
+      const typename ParamGenerator<T9>::iterator& current9,
+      const ParamGenerator<T10>& g10,
+      const typename ParamGenerator<T10>::iterator& current10)
+        : base_(base),
+          begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+          begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+          begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+          begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+          begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+          begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+          begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+          begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+          begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
+          begin10_(g10.begin()), end10_(g10.end()), current10_(current10)    {
+      ComputeCurrentValue();
+    }
+    virtual ~Iterator() {}
+
+    virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+      return base_;
+    }
+    // Advance should not be called on beyond-of-range iterators
+    // so no component iterators must be beyond end of range, either.
+    virtual void Advance() {
+      assert(!AtEnd());
+      ++current10_;
+      if (current10_ == end10_) {
+        current10_ = begin10_;
+        ++current9_;
+      }
+      if (current9_ == end9_) {
+        current9_ = begin9_;
+        ++current8_;
+      }
+      if (current8_ == end8_) {
+        current8_ = begin8_;
+        ++current7_;
+      }
+      if (current7_ == end7_) {
+        current7_ = begin7_;
+        ++current6_;
+      }
+      if (current6_ == end6_) {
+        current6_ = begin6_;
+        ++current5_;
+      }
+      if (current5_ == end5_) {
+        current5_ = begin5_;
+        ++current4_;
+      }
+      if (current4_ == end4_) {
+        current4_ = begin4_;
+        ++current3_;
+      }
+      if (current3_ == end3_) {
+        current3_ = begin3_;
+        ++current2_;
+      }
+      if (current2_ == end2_) {
+        current2_ = begin2_;
+        ++current1_;
+      }
+      ComputeCurrentValue();
+    }
+    virtual ParamIteratorInterface<ParamType>* Clone() const {
+      return new Iterator(*this);
+    }
+    virtual const ParamType* Current() const { return &current_value_; }
+    virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+      // Having the same base generator guarantees that the other
+      // iterator is of the same type and we can downcast.
+      GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+          << "The program attempted to compare iterators "
+          << "from different generators." << std::endl;
+      const Iterator* typed_other =
+          CheckedDowncastToActualType<const Iterator>(&other);
+      // We must report iterators equal if they both point beyond their
+      // respective ranges. That can happen in a variety of fashions,
+      // so we have to consult AtEnd().
+      return (AtEnd() && typed_other->AtEnd()) ||
+         (
+          current1_ == typed_other->current1_ &&
+          current2_ == typed_other->current2_ &&
+          current3_ == typed_other->current3_ &&
+          current4_ == typed_other->current4_ &&
+          current5_ == typed_other->current5_ &&
+          current6_ == typed_other->current6_ &&
+          current7_ == typed_other->current7_ &&
+          current8_ == typed_other->current8_ &&
+          current9_ == typed_other->current9_ &&
+          current10_ == typed_other->current10_);
+    }
+
+   private:
+    Iterator(const Iterator& other)
+        : base_(other.base_),
+        begin1_(other.begin1_),
+        end1_(other.end1_),
+        current1_(other.current1_),
+        begin2_(other.begin2_),
+        end2_(other.end2_),
+        current2_(other.current2_),
+        begin3_(other.begin3_),
+        end3_(other.end3_),
+        current3_(other.current3_),
+        begin4_(other.begin4_),
+        end4_(other.end4_),
+        current4_(other.current4_),
+        begin5_(other.begin5_),
+        end5_(other.end5_),
+        current5_(other.current5_),
+        begin6_(other.begin6_),
+        end6_(other.end6_),
+        current6_(other.current6_),
+        begin7_(other.begin7_),
+        end7_(other.end7_),
+        current7_(other.current7_),
+        begin8_(other.begin8_),
+        end8_(other.end8_),
+        current8_(other.current8_),
+        begin9_(other.begin9_),
+        end9_(other.end9_),
+        current9_(other.current9_),
+        begin10_(other.begin10_),
+        end10_(other.end10_),
+        current10_(other.current10_) {
+      ComputeCurrentValue();
+    }
+
+    void ComputeCurrentValue() {
+      if (!AtEnd())
+        current_value_ = ParamType(*current1_, *current2_, *current3_,
+            *current4_, *current5_, *current6_, *current7_, *current8_,
+            *current9_, *current10_);
+    }
+    bool AtEnd() const {
+      // We must report iterator past the end of the range when either of the
+      // component iterators has reached the end of its range.
+      return
+          current1_ == end1_ ||
+          current2_ == end2_ ||
+          current3_ == end3_ ||
+          current4_ == end4_ ||
+          current5_ == end5_ ||
+          current6_ == end6_ ||
+          current7_ == end7_ ||
+          current8_ == end8_ ||
+          current9_ == end9_ ||
+          current10_ == end10_;
+    }
+
+    // No implementation - assignment is unsupported.
+    void operator=(const Iterator& other);
+
+    const ParamGeneratorInterface<ParamType>* const base_;
+    // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+    // current[i]_ is the actual traversing iterator.
+    const typename ParamGenerator<T1>::iterator begin1_;
+    const typename ParamGenerator<T1>::iterator end1_;
+    typename ParamGenerator<T1>::iterator current1_;
+    const typename ParamGenerator<T2>::iterator begin2_;
+    const typename ParamGenerator<T2>::iterator end2_;
+    typename ParamGenerator<T2>::iterator current2_;
+    const typename ParamGenerator<T3>::iterator begin3_;
+    const typename ParamGenerator<T3>::iterator end3_;
+    typename ParamGenerator<T3>::iterator current3_;
+    const typename ParamGenerator<T4>::iterator begin4_;
+    const typename ParamGenerator<T4>::iterator end4_;
+    typename ParamGenerator<T4>::iterator current4_;
+    const typename ParamGenerator<T5>::iterator begin5_;
+    const typename ParamGenerator<T5>::iterator end5_;
+    typename ParamGenerator<T5>::iterator current5_;
+    const typename ParamGenerator<T6>::iterator begin6_;
+    const typename ParamGenerator<T6>::iterator end6_;
+    typename ParamGenerator<T6>::iterator current6_;
+    const typename ParamGenerator<T7>::iterator begin7_;
+    const typename ParamGenerator<T7>::iterator end7_;
+    typename ParamGenerator<T7>::iterator current7_;
+    const typename ParamGenerator<T8>::iterator begin8_;
+    const typename ParamGenerator<T8>::iterator end8_;
+    typename ParamGenerator<T8>::iterator current8_;
+    const typename ParamGenerator<T9>::iterator begin9_;
+    const typename ParamGenerator<T9>::iterator end9_;
+    typename ParamGenerator<T9>::iterator current9_;
+    const typename ParamGenerator<T10>::iterator begin10_;
+    const typename ParamGenerator<T10>::iterator end10_;
+    typename ParamGenerator<T10>::iterator current10_;
+    ParamType current_value_;
+  };  // class CartesianProductGenerator10::Iterator
+
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductGenerator10& other);
+
+  const ParamGenerator<T1> g1_;
+  const ParamGenerator<T2> g2_;
+  const ParamGenerator<T3> g3_;
+  const ParamGenerator<T4> g4_;
+  const ParamGenerator<T5> g5_;
+  const ParamGenerator<T6> g6_;
+  const ParamGenerator<T7> g7_;
+  const ParamGenerator<T8> g8_;
+  const ParamGenerator<T9> g9_;
+  const ParamGenerator<T10> g10_;
+};  // class CartesianProductGenerator10
+
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+template <class Generator1, class Generator2>
+class CartesianProductHolder2 {
+ public:
+CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
+      : g1_(g1), g2_(g2) {}
+  template <typename T1, typename T2>
+  operator ParamGenerator< ::testing::tuple<T1, T2> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2> >(
+        new CartesianProductGenerator2<T1, T2>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder2& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+};  // class CartesianProductHolder2
+
+template <class Generator1, class Generator2, class Generator3>
+class CartesianProductHolder3 {
+ public:
+CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3)
+      : g1_(g1), g2_(g2), g3_(g3) {}
+  template <typename T1, typename T2, typename T3>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3> >(
+        new CartesianProductGenerator3<T1, T2, T3>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder3& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+};  // class CartesianProductHolder3
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4>
+class CartesianProductHolder4 {
+ public:
+CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+  template <typename T1, typename T2, typename T3, typename T4>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >(
+        new CartesianProductGenerator4<T1, T2, T3, T4>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder4& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+};  // class CartesianProductHolder4
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5>
+class CartesianProductHolder5 {
+ public:
+CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >(
+        new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder5& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+};  // class CartesianProductHolder5
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5, class Generator6>
+class CartesianProductHolder6 {
+ public:
+CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5,
+    const Generator6& g6)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+      typename T6>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >(
+        new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_),
+        static_cast<ParamGenerator<T6> >(g6_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder6& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+  const Generator6 g6_;
+};  // class CartesianProductHolder6
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5, class Generator6, class Generator7>
+class CartesianProductHolder7 {
+ public:
+CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5,
+    const Generator6& g6, const Generator7& g7)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+      typename T6, typename T7>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+      T7> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> >(
+        new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_),
+        static_cast<ParamGenerator<T6> >(g6_),
+        static_cast<ParamGenerator<T7> >(g7_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder7& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+  const Generator6 g6_;
+  const Generator7 g7_;
+};  // class CartesianProductHolder7
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5, class Generator6, class Generator7,
+    class Generator8>
+class CartesianProductHolder8 {
+ public:
+CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5,
+    const Generator6& g6, const Generator7& g7, const Generator8& g8)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+          g8_(g8) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+      typename T6, typename T7, typename T8>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7,
+      T8> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
+        new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_),
+        static_cast<ParamGenerator<T6> >(g6_),
+        static_cast<ParamGenerator<T7> >(g7_),
+        static_cast<ParamGenerator<T8> >(g8_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder8& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+  const Generator6 g6_;
+  const Generator7 g7_;
+  const Generator8 g8_;
+};  // class CartesianProductHolder8
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5, class Generator6, class Generator7,
+    class Generator8, class Generator9>
+class CartesianProductHolder9 {
+ public:
+CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5,
+    const Generator6& g6, const Generator7& g7, const Generator8& g8,
+    const Generator9& g9)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+          g9_(g9) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+      typename T6, typename T7, typename T8, typename T9>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+      T9> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+        T9> >(
+        new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_),
+        static_cast<ParamGenerator<T6> >(g6_),
+        static_cast<ParamGenerator<T7> >(g7_),
+        static_cast<ParamGenerator<T8> >(g8_),
+        static_cast<ParamGenerator<T9> >(g9_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder9& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+  const Generator6 g6_;
+  const Generator7 g7_;
+  const Generator8 g8_;
+  const Generator9 g9_;
+};  // class CartesianProductHolder9
+
+template <class Generator1, class Generator2, class Generator3,
+    class Generator4, class Generator5, class Generator6, class Generator7,
+    class Generator8, class Generator9, class Generator10>
+class CartesianProductHolder10 {
+ public:
+CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
+    const Generator3& g3, const Generator4& g4, const Generator5& g5,
+    const Generator6& g6, const Generator7& g7, const Generator8& g8,
+    const Generator9& g9, const Generator10& g10)
+      : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+          g9_(g9), g10_(g10) {}
+  template <typename T1, typename T2, typename T3, typename T4, typename T5,
+      typename T6, typename T7, typename T8, typename T9, typename T10>
+  operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+      T10> >() const {
+    return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+        T10> >(
+        new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+            T10>(
+        static_cast<ParamGenerator<T1> >(g1_),
+        static_cast<ParamGenerator<T2> >(g2_),
+        static_cast<ParamGenerator<T3> >(g3_),
+        static_cast<ParamGenerator<T4> >(g4_),
+        static_cast<ParamGenerator<T5> >(g5_),
+        static_cast<ParamGenerator<T6> >(g6_),
+        static_cast<ParamGenerator<T7> >(g7_),
+        static_cast<ParamGenerator<T8> >(g8_),
+        static_cast<ParamGenerator<T9> >(g9_),
+        static_cast<ParamGenerator<T10> >(g10_)));
+  }
+
+ private:
+  // No implementation - assignment is unsupported.
+  void operator=(const CartesianProductHolder10& other);
+
+  const Generator1 g1_;
+  const Generator2 g2_;
+  const Generator3 g3_;
+  const Generator4 g4_;
+  const Generator5 g5_;
+  const Generator6 g6_;
+  const Generator7 g7_;
+  const Generator8 g8_;
+  const Generator9 g9_;
+  const Generator10 g10_;
+};  // class CartesianProductHolder10
+
+# endif  // GTEST_HAS_COMBINE
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  //  GTEST_HAS_PARAM_TEST
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+//   - returns a generator producing a sequence of values {start, start+1,
+//     start+2, ..., }.
+// Range(start, end, step)
+//   - returns a generator producing a sequence of values {start, start+step,
+//     start+step+step, ..., }.
+// Notes:
+//   * The generated sequences never include end. For example, Range(1, 5)
+//     returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+//     returns a generator producing {1, 3, 5, 7}.
+//   * start and end must have the same type. That type may be any integral or
+//     floating-point type or a user defined type satisfying these conditions:
+//     * It must be assignable (have operator=() defined).
+//     * It must have operator+() (operator+(int-compatible type) for
+//       two-operand version).
+//     * It must have operator<() defined.
+//     Elements in the resulting sequences will also have that type.
+//   * Condition start < end must be satisfied in order for resulting sequences
+//     to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+  return internal::ParamGenerator<T>(
+      new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+  return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+//   - returns a generator producing sequences with elements from
+//     a C-style array.
+// ValuesIn(const Container& container)
+//   - returns a generator producing sequences with elements from
+//     an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+//   - returns a generator producing sequences with elements from
+//     a range [begin, end) defined by a pair of STL-style iterators. These
+//     iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+//   ::std::vector< ::std::string> v;
+//   v.push_back("a");
+//   v.push_back("b");
+//   return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+//                         StlStringTest,
+//                         ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+//   ::std::list<char> list;
+//   list.push_back('a');
+//   list.push_back('b');
+//   return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+//                         CharTest,
+//                         ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+  typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+  typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
+      ::value_type ParamType;
+  return internal::ParamGenerator<ParamType>(
+      new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+  return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+    const Container& container) {
+  return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+//   - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to 50 parameters.
+//
+template <typename T1>
+internal::ValueArray1<T1> Values(T1 v1) {
+  return internal::ValueArray1<T1>(v1);
+}
+
+template <typename T1, typename T2>
+internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
+  return internal::ValueArray2<T1, T2>(v1, v2);
+}
+
+template <typename T1, typename T2, typename T3>
+internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
+  return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
+  return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5) {
+  return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6>
+internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
+    T4 v4, T5 v5, T6 v6) {
+  return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7>
+internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
+    T4 v4, T5 v5, T6 v6, T7 v7) {
+  return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
+      v6, v7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8>
+internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
+  return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
+      v5, v6, v7, v8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9>
+internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
+  return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
+      v4, v5, v6, v7, v8, v9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10>
+internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
+    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
+  return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
+      v2, v3, v4, v5, v6, v7, v8, v9, v10);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11>
+internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+    T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11) {
+  return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+      T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12>
+internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+    T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12) {
+  return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13>
+internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+    T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13) {
+  return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14>
+internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
+  return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+      v14);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15>
+internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
+  return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+      v13, v14, v15);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16>
+internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16) {
+  return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+      v12, v13, v14, v15, v16);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17>
+internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17) {
+  return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+      v11, v12, v13, v14, v15, v16, v17);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18>
+internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18) {
+  return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+      v10, v11, v12, v13, v14, v15, v16, v17, v18);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19>
+internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
+  return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
+      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20>
+internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
+  return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
+      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21>
+internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
+  return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
+      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22>
+internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
+    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22) {
+  return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
+      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+      v20, v21, v22);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23>
+internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22, T23 v23) {
+  return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
+      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+      v20, v21, v22, v23);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24>
+internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22, T23 v23, T24 v24) {
+  return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
+      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+      v19, v20, v21, v22, v23, v24);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25>
+internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
+    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
+  return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
+      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+      v18, v19, v20, v21, v22, v23, v24, v25);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26>
+internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+    T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26) {
+  return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27>
+internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+    T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27) {
+  return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28>
+internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+    T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28) {
+  return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+      v28);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29>
+internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28, T29 v29) {
+  return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+      v27, v28, v29);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30>
+internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
+  return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+      v26, v27, v28, v29, v30);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31>
+internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
+  return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+      v25, v26, v27, v28, v29, v30, v31);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32>
+internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+    T32 v32) {
+  return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+      v24, v25, v26, v27, v28, v29, v30, v31, v32);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33>
+internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+    T32 v32, T33 v33) {
+  return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
+      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34>
+internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+    T31 v31, T32 v32, T33 v33, T34 v34) {
+  return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
+      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35>
+internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
+  return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
+      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36>
+internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
+  return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
+      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+      v34, v35, v36);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37>
+internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
+    T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+    T37 v37) {
+  return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
+      v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+      v34, v35, v36, v37);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38>
+internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+    T37 v37, T38 v38) {
+  return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
+      v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+      v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
+      v33, v34, v35, v36, v37, v38);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39>
+internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
+    T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+    T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+    T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+    T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+    T37 v37, T38 v38, T39 v39) {
+  return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
+      v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+      v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
+      v32, v33, v34, v35, v36, v37, v38, v39);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40>
+internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
+    T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+    T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+    T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
+    T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
+    T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
+  return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+      v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
+      v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41>
+internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+    T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
+  return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+      v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
+      v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42>
+internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+    T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+    T42 v42) {
+  return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+      v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+      v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
+      v42);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43>
+internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+    T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+    T42 v42, T43 v43) {
+  return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+      v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+      v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
+      v41, v42, v43);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44>
+internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+    T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+    T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+    T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+    T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+    T42 v42, T43 v43, T44 v44) {
+  return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+      v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+      v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
+      v40, v41, v42, v43, v44);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45>
+internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+    T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+    T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+    T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+    T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+    T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
+  return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+      v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+      v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
+      v39, v40, v41, v42, v43, v44, v45);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46>
+internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
+  return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+      v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+      v38, v39, v40, v41, v42, v43, v44, v45, v46);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47>
+internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+    T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
+  return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
+      v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+      v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+      v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48>
+internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+    T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+    T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+    T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+    T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+    T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
+    T48 v48) {
+  return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
+      v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+      v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
+      v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49>
+internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+    T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+    T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+    T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+    T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
+    T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
+    T47 v47, T48 v48, T49 v49) {
+  return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
+      v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+      v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
+      v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+    typename T6, typename T7, typename T8, typename T9, typename T10,
+    typename T11, typename T12, typename T13, typename T14, typename T15,
+    typename T16, typename T17, typename T18, typename T19, typename T20,
+    typename T21, typename T22, typename T23, typename T24, typename T25,
+    typename T26, typename T27, typename T28, typename T29, typename T30,
+    typename T31, typename T32, typename T33, typename T34, typename T35,
+    typename T36, typename T37, typename T38, typename T39, typename T40,
+    typename T41, typename T42, typename T43, typename T44, typename T45,
+    typename T46, typename T47, typename T48, typename T49, typename T50>
+internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+    T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+    T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+    T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+    T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+    T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+    T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+    T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
+    T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
+    T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
+  return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+      T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+      T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+      T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
+      v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+      v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+      v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
+      v48, v49, v50);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+//   - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+//   virtual void SetUp() {
+//     external_flag = GetParam();
+//   }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+  return Values(false, true);
+}
+
+# if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+//   - returns a generator producing sequences with elements coming from
+//     the Cartesian product of elements from the sequences generated by
+//     gen1, gen2, ..., genN. The sequence elements will have a type of
+//     tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+//     of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to 10 arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+//     : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+//                         Combine(Values("cat", "dog"),
+//                                 Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+//     : public testing::TestWithParam<tuple<bool, bool> > {
+//   virtual void SetUp() {
+//     // Assigns external_flag_1 and external_flag_2 values from the tuple.
+//     tie(external_flag_1, external_flag_2) = GetParam();
+//   }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+//   // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+//                         Combine(Bool(), Bool()));
+//
+template <typename Generator1, typename Generator2>
+internal::CartesianProductHolder2<Generator1, Generator2> Combine(
+    const Generator1& g1, const Generator2& g2) {
+  return internal::CartesianProductHolder2<Generator1, Generator2>(
+      g1, g2);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3>
+internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3) {
+  return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
+      g1, g2, g3);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4>
+internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+    Generator4> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4) {
+  return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+      Generator4>(
+      g1, g2, g3, g4);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5>
+internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+    Generator4, Generator5> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5) {
+  return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+      Generator4, Generator5>(
+      g1, g2, g3, g4, g5);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5, typename Generator6>
+internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+    Generator4, Generator5, Generator6> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5, const Generator6& g6) {
+  return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+      Generator4, Generator5, Generator6>(
+      g1, g2, g3, g4, g5, g6);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5, typename Generator6,
+    typename Generator7>
+internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+    Generator4, Generator5, Generator6, Generator7> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5, const Generator6& g6,
+        const Generator7& g7) {
+  return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+      Generator4, Generator5, Generator6, Generator7>(
+      g1, g2, g3, g4, g5, g6, g7);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5, typename Generator6,
+    typename Generator7, typename Generator8>
+internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+    Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5, const Generator6& g6,
+        const Generator7& g7, const Generator8& g8) {
+  return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+      Generator4, Generator5, Generator6, Generator7, Generator8>(
+      g1, g2, g3, g4, g5, g6, g7, g8);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5, typename Generator6,
+    typename Generator7, typename Generator8, typename Generator9>
+internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+    Generator4, Generator5, Generator6, Generator7, Generator8,
+    Generator9> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5, const Generator6& g6,
+        const Generator7& g7, const Generator8& g8, const Generator9& g9) {
+  return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
+      g1, g2, g3, g4, g5, g6, g7, g8, g9);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+    typename Generator4, typename Generator5, typename Generator6,
+    typename Generator7, typename Generator8, typename Generator9,
+    typename Generator10>
+internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+    Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+    Generator10> Combine(
+    const Generator1& g1, const Generator2& g2, const Generator3& g3,
+        const Generator4& g4, const Generator5& g5, const Generator6& g6,
+        const Generator7& g7, const Generator8& g8, const Generator9& g9,
+        const Generator10& g10) {
+  return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+      Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+      Generator10>(
+      g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
+}
+# endif  // GTEST_HAS_COMBINE
+
+
+
+# define TEST_P(test_case_name, test_name) \
+  class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+      : public test_case_name { \
+   public: \
+    GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+    virtual void TestBody(); \
+   private: \
+    static int AddToRegistry() { \
+      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+          GetTestCasePatternHolder<test_case_name>(\
+              #test_case_name, \
+              ::testing::internal::CodeLocation(\
+                  __FILE__, __LINE__))->AddTestPattern(\
+                      #test_case_name, \
+                      #test_name, \
+                      new ::testing::internal::TestMetaFactory< \
+                          GTEST_TEST_CLASS_NAME_(\
+                              test_case_name, test_name)>()); \
+      return 0; \
+    } \
+    static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+        GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+  }; \
+  int GTEST_TEST_CLASS_NAME_(test_case_name, \
+                             test_name)::gtest_registering_dummy_ = \
+      GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+  void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+// The optional last argument to INSTANTIATE_TEST_CASE_P allows the user
+// to specify a function or functor that generates custom test name suffixes
+// based on the test parameters. The function should accept one argument of
+// type testing::TestParamInfo<class ParamType>, and return std::string.
+//
+// testing::PrintToStringParamName is a builtin test suffix generator that
+// returns the value of testing::PrintToString(GetParam()). It does not work
+// for std::string or C strings.
+//
+// Note: test names must be non-empty, unique, and may only contain ASCII
+// alphanumeric characters or underscore.
+
+# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator, ...) \
+  ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+      gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+  ::std::string gtest_##prefix##test_case_name##_EvalGenerateName_( \
+      const ::testing::TestParamInfo<test_case_name::ParamType>& info) { \
+    return ::testing::internal::GetParamNameGen<test_case_name::ParamType> \
+        (__VA_ARGS__)(info); \
+  } \
+  int gtest_##prefix##test_case_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
+      ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+          GetTestCasePatternHolder<test_case_name>(\
+              #test_case_name, \
+              ::testing::internal::CodeLocation(\
+                  __FILE__, __LINE__))->AddTestCaseInstantiation(\
+                      #prefix, \
+                      &gtest_##prefix##test_case_name##_EvalGenerator_, \
+                      &gtest_##prefix##test_case_name##_EvalGenerateName_, \
+                      __FILE__, __LINE__)
+
+}  // namespace testing
+
+#endif  // GTEST_HAS_PARAM_TEST
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class.  For example:
+//
+// class MyClass {
+//  private:
+//   void MyMethod();
+//   FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+//   // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+//   // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+  // The possible outcomes of a test part (i.e. an assertion or an
+  // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+  enum Type {
+    kSuccess,          // Succeeded.
+    kNonFatalFailure,  // Failed but the test can continue.
+    kFatalFailure      // Failed and the test should be terminated.
+  };
+
+  // C'tor.  TestPartResult does NOT have a default constructor.
+  // Always use this constructor (with parameters) to create a
+  // TestPartResult object.
+  TestPartResult(Type a_type,
+                 const char* a_file_name,
+                 int a_line_number,
+                 const char* a_message)
+      : type_(a_type),
+        file_name_(a_file_name == NULL ? "" : a_file_name),
+        line_number_(a_line_number),
+        summary_(ExtractSummary(a_message)),
+        message_(a_message) {
+  }
+
+  // Gets the outcome of the test part.
+  Type type() const { return type_; }
+
+  // Gets the name of the source file where the test part took place, or
+  // NULL if it's unknown.
+  const char* file_name() const {
+    return file_name_.empty() ? NULL : file_name_.c_str();
+  }
+
+  // Gets the line in the source file where the test part took place,
+  // or -1 if it's unknown.
+  int line_number() const { return line_number_; }
+
+  // Gets the summary of the failure message.
+  const char* summary() const { return summary_.c_str(); }
+
+  // Gets the message associated with the test part.
+  const char* message() const { return message_.c_str(); }
+
+  // Returns true iff the test part passed.
+  bool passed() const { return type_ == kSuccess; }
+
+  // Returns true iff the test part failed.
+  bool failed() const { return type_ != kSuccess; }
+
+  // Returns true iff the test part non-fatally failed.
+  bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+  // Returns true iff the test part fatally failed.
+  bool fatally_failed() const { return type_ == kFatalFailure; }
+
+ private:
+  Type type_;
+
+  // Gets the summary of the failure message by omitting the stack
+  // trace in it.
+  static std::string ExtractSummary(const char* message);
+
+  // The name of the source file where the test part took place, or
+  // "" if the source file is unknown.
+  std::string file_name_;
+  // The line in the source file where the test part took place, or -1
+  // if the line number is unknown.
+  int line_number_;
+  std::string summary_;  // The test failure summary.
+  std::string message_;  // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+  TestPartResultArray() {}
+
+  // Appends the given TestPartResult to the array.
+  void Append(const TestPartResult& result);
+
+  // Returns the TestPartResult at the given index (0-based).
+  const TestPartResult& GetTestPartResult(int index) const;
+
+  // Returns the number of TestPartResult objects in the array.
+  int size() const;
+
+ private:
+  std::vector<TestPartResult> array_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class TestPartResultReporterInterface {
+ public:
+  virtual ~TestPartResultReporterInterface() {}
+
+  virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+    : public TestPartResultReporterInterface {
+ public:
+  HasNewFatalFailureHelper();
+  virtual ~HasNewFatalFailureHelper();
+  virtual void ReportTestPartResult(const TestPartResult& result);
+  bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+  bool has_new_fatal_failure_;
+  TestPartResultReporterInterface* original_reporter_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+}  // namespace internal
+
+}  // namespace testing
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list.  You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template.  It should be parameterized
+// by a type.  Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+  ...
+  typedef std::list<T> List;
+  static T shared_;
+  T value_;
+};
+
+// Next, associate a list of types with the test case, which will be
+// repeated for each type in the list.  The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_CASE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+//   TYPED_TEST_CASE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test case as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+  // Inside a test, refer to TypeParam to get the type parameter.
+  // Since we are inside a derived class template, C++ requires use to
+  // visit the members of FooTest via 'this'.
+  TypeParam n = this->value_;
+
+  // To visit static members of the fixture, add the TestFixture::
+  // prefix.
+  n += TestFixture::shared_;
+
+  // To refer to typedefs in the fixture, add the "typename
+  // TestFixture::" prefix.
+  typename TestFixture::List values;
+  values.push_back(n);
+  ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+#endif  // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type.  Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are.  The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have.  Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly.  Here's an example:
+
+#if 0
+
+// First, define a fixture class template.  It should be parameterized
+// by a type.  Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+  ...
+};
+
+// Next, declare that you will define a type-parameterized test case
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_CASE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test case as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+  // Inside a test, refer to TypeParam to get the type parameter.
+  TypeParam n = 0;
+  ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them.  The first argument of the macro is the
+// test case name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_CASE_P(FooTest,
+                           DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want.  If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test case name.  Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+//   INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
+
+#endif  // 0
+
+
+// Implements typed tests.
+
+#if GTEST_HAS_TYPED_TEST
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test case.
+# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define TYPED_TEST_CASE(CaseName, Types) \
+  typedef ::testing::internal::TypeList< Types >::type \
+      GTEST_TYPE_PARAMS_(CaseName)
+
+# define TYPED_TEST(CaseName, TestName) \
+  template <typename gtest_TypeParam_> \
+  class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+      : public CaseName<gtest_TypeParam_> { \
+   private: \
+    typedef CaseName<gtest_TypeParam_> TestFixture; \
+    typedef gtest_TypeParam_ TypeParam; \
+    virtual void TestBody(); \
+  }; \
+  bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
+      ::testing::internal::TypeParameterizedTest< \
+          CaseName, \
+          ::testing::internal::TemplateSel< \
+              GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
+          GTEST_TYPE_PARAMS_(CaseName)>::Register(\
+              "", ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+              #CaseName, #TestName, 0); \
+  template <typename gtest_TypeParam_> \
+  void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
+
+#endif  // GTEST_HAS_TYPED_TEST
+
+// Implements type-parameterized tests.
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test case are defined in.  The exact
+// name of the namespace is subject to change without notice.
+# define GTEST_CASE_NAMESPACE_(TestCaseName) \
+  gtest_case_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test case.
+# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
+  gtest_typed_test_case_p_state_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test case.
+# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
+  gtest_registered_test_names_##TestCaseName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+# define TYPED_TEST_CASE_P(CaseName) \
+  static ::testing::internal::TypedTestCasePState \
+      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
+
+# define TYPED_TEST_P(CaseName, TestName) \
+  namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+  template <typename gtest_TypeParam_> \
+  class TestName : public CaseName<gtest_TypeParam_> { \
+   private: \
+    typedef CaseName<gtest_TypeParam_> TestFixture; \
+    typedef gtest_TypeParam_ TypeParam; \
+    virtual void TestBody(); \
+  }; \
+  static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
+      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
+          __FILE__, __LINE__, #CaseName, #TestName); \
+  } \
+  template <typename gtest_TypeParam_> \
+  void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
+
+# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
+  namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+  typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
+  } \
+  static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
+      GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
+          __FILE__, __LINE__, #__VA_ARGS__)
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
+  bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \
+      ::testing::internal::TypeParameterizedTestCase<CaseName, \
+          GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
+          ::testing::internal::TypeList< Types >::type>::Register(\
+              #Prefix, \
+              ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+              &GTEST_TYPED_TEST_CASE_P_STATE_(CaseName), \
+              #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
+
+#endif  // GTEST_HAS_TYPED_TEST_P
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// Depending on the platform, different string classes are available.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// You can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
+//
+// If ::std::string and ::string are the same class on your platform
+// due to aliasing, you should define GTEST_HAS_GLOBAL_STRING to 0.
+//
+// If you do not define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
+
+namespace testing {
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// When this flag is set with a "host:port" string, on supported
+// platforms test results are streamed to the specified port on
+// the specified host machine.
+GTEST_DECLARE_string_(stream_result_to);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class StreamingListenerTest;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class UnitTestRecordPropertyTestHelper;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+                                    const std::string& message);
+
+}  // namespace internal
+
+// The friend relationship of some of these classes is cyclic.
+// If we don't forward declare them the compiler might confuse the classes
+// in friendship clauses with same named classes on the scope.
+class Test;
+class TestCase;
+class TestInfo;
+class UnitTest;
+
+// A class for indicating whether an assertion was successful.  When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+//   1. Defining predicate functions to be used with Boolean test assertions
+//      EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+//   2. Defining predicate-format functions to be
+//      used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+//   testing::AssertionResult IsEven(int n) {
+//     if ((n % 2) == 0)
+//       return testing::AssertionSuccess();
+//     else
+//       return testing::AssertionFailure() << n << " is odd";
+//   }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+//   Value of: IsEven(Fib(5))
+//     Actual: false (5 is odd)
+//   Expected: true
+//
+// instead of a more opaque
+//
+//   Value of: IsEven(Fib(5))
+//     Actual: false
+//   Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+//   testing::AssertionResult IsEven(int n) {
+//     if ((n % 2) == 0)
+//       return testing::AssertionSuccess() << n << " is even";
+//     else
+//       return testing::AssertionFailure() << n << " is odd";
+//   }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+//   Value of: IsEven(Fib(6))
+//     Actual: true (8 is even)
+//   Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+//   // Verifies that Foo() returns an even number.
+//   EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+//   testing::AssertionResult IsEven(const char* expr, int n) {
+//     if ((n % 2) == 0)
+//       return testing::AssertionSuccess();
+//     else
+//       return testing::AssertionFailure()
+//         << "Expected: " << expr << " is even\n  Actual: it's " << n;
+//   }
+//
+// If Foo() returns 5, you will see the following message:
+//
+//   Expected: Foo() is even
+//     Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+  // Copy constructor.
+  // Used in EXPECT_TRUE/FALSE(assertion_result).
+  AssertionResult(const AssertionResult& other);
+
+  GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */)
+
+  // Used in the EXPECT_TRUE/FALSE(bool_expression).
+  //
+  // T must be contextually convertible to bool.
+  //
+  // The second parameter prevents this overload from being considered if
+  // the argument is implicitly convertible to AssertionResult. In that case
+  // we want AssertionResult's copy constructor to be used.
+  template <typename T>
+  explicit AssertionResult(
+      const T& success,
+      typename internal::EnableIf<
+          !internal::ImplicitlyConvertible<T, AssertionResult>::value>::type*
+          /*enabler*/ = NULL)
+      : success_(success) {}
+
+  GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+  // Assignment operator.
+  AssertionResult& operator=(AssertionResult other) {
+    swap(other);
+    return *this;
+  }
+
+  // Returns true iff the assertion succeeded.
+  operator bool() const { return success_; }  // NOLINT
+
+  // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+  AssertionResult operator!() const;
+
+  // Returns the text streamed into this AssertionResult. Test assertions
+  // use it when they fail (i.e., the predicate's outcome doesn't match the
+  // assertion's expectation). When nothing has been streamed into the
+  // object, returns an empty string.
+  const char* message() const {
+    return message_.get() != NULL ?  message_->c_str() : "";
+  }
+  // TODO(vladl@google.com): Remove this after making sure no clients use it.
+  // Deprecated; please use message() instead.
+  const char* failure_message() const { return message(); }
+
+  // Streams a custom failure message into this object.
+  template <typename T> AssertionResult& operator<<(const T& value) {
+    AppendMessage(Message() << value);
+    return *this;
+  }
+
+  // Allows streaming basic output manipulators such as endl or flush into
+  // this object.
+  AssertionResult& operator<<(
+      ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
+    AppendMessage(Message() << basic_manipulator);
+    return *this;
+  }
+
+ private:
+  // Appends the contents of message to message_.
+  void AppendMessage(const Message& a_message) {
+    if (message_.get() == NULL)
+      message_.reset(new ::std::string);
+    message_->append(a_message.GetString().c_str());
+  }
+
+  // Swap the contents of this AssertionResult with other.
+  void swap(AssertionResult& other);
+
+  // Stores result of the assertion predicate.
+  bool success_;
+  // Stores the message describing the condition in case the expectation
+  // construct is not satisfied with the predicate's outcome.
+  // Referenced via a pointer to avoid taking too much stack frame space
+  // with test assertions.
+  internal::scoped_ptr< ::std::string> message_;
+};
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestCases, and
+// each TestCase contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used a TEST_F.  For example:
+//
+//   class FooTest : public testing::Test {
+//    protected:
+//     void SetUp() override { ... }
+//     void TearDown() override { ... }
+//     ...
+//   };
+//
+//   TEST_F(FooTest, Bar) { ... }
+//   TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+  friend class TestInfo;
+
+  // Defines types for pointers to functions that set up and tear down
+  // a test case.
+  typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
+  typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
+
+  // The d'tor is virtual as we intend to inherit from Test.
+  virtual ~Test();
+
+  // Sets up the stuff shared by all tests in this test case.
+  //
+  // Google Test will call Foo::SetUpTestCase() before running the first
+  // test in test case Foo.  Hence a sub-class can define its own
+  // SetUpTestCase() method to shadow the one defined in the super
+  // class.
+  static void SetUpTestCase() {}
+
+  // Tears down the stuff shared by all tests in this test case.
+  //
+  // Google Test will call Foo::TearDownTestCase() after running the last
+  // test in test case Foo.  Hence a sub-class can define its own
+  // TearDownTestCase() method to shadow the one defined in the super
+  // class.
+  static void TearDownTestCase() {}
+
+  // Returns true iff the current test has a fatal failure.
+  static bool HasFatalFailure();
+
+  // Returns true iff the current test has a non-fatal failure.
+  static bool HasNonfatalFailure();
+
+  // Returns true iff the current test has a (either fatal or
+  // non-fatal) failure.
+  static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+  // Logs a property for the current test, test case, or for the entire
+  // invocation of the test program when used outside of the context of a
+  // test case.  Only the last value for a given key is remembered.  These
+  // are public static so they can be called from utility functions that are
+  // not members of the test fixture.  Calls to RecordProperty made during
+  // lifespan of the test (from the moment its constructor starts to the
+  // moment its destructor finishes) will be output in XML as attributes of
+  // the <testcase> element.  Properties recorded from fixture's
+  // SetUpTestCase or TearDownTestCase are logged as attributes of the
+  // corresponding <testsuite> element.  Calls to RecordProperty made in the
+  // global context (before or after invocation of RUN_ALL_TESTS and from
+  // SetUp/TearDown method of Environment objects registered with Google
+  // Test) will be output as attributes of the <testsuites> element.
+  static void RecordProperty(const std::string& key, const std::string& value);
+  static void RecordProperty(const std::string& key, int value);
+
+ protected:
+  // Creates a Test object.
+  Test();
+
+  // Sets up the test fixture.
+  virtual void SetUp();
+
+  // Tears down the test fixture.
+  virtual void TearDown();
+
+ private:
+  // Returns true iff the current test has the same fixture class as
+  // the first test in the current test case.
+  static bool HasSameFixtureClass();
+
+  // Runs the test after the test fixture has been set up.
+  //
+  // A sub-class must implement this to define the test logic.
+  //
+  // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+  // Instead, use the TEST or TEST_F macro.
+  virtual void TestBody() = 0;
+
+  // Sets up, executes, and tears down the test.
+  void Run();
+
+  // Deletes self.  We deliberately pick an unusual name for this
+  // internal method to avoid clashing with names used in user TESTs.
+  void DeleteSelf_() { delete this; }
+
+  const internal::scoped_ptr< GTEST_FLAG_SAVER_ > gtest_flag_saver_;
+
+  // Often a user misspells SetUp() as Setup() and spends a long time
+  // wondering why it is never called by Google Test.  The declaration of
+  // the following method is solely for catching such an error at
+  // compile time:
+  //
+  //   - The return type is deliberately chosen to be not void, so it
+  //   will be a conflict if void Setup() is declared in the user's
+  //   test fixture.
+  //
+  //   - This method is private, so it will be another compiler error
+  //   if the method is called from the user's test fixture.
+  //
+  // DO NOT OVERRIDE THIS FUNCTION.
+  //
+  // If you see an error about overriding the following function or
+  // about it being private, you have mis-spelled SetUp() as Setup().
+  struct Setup_should_be_spelled_SetUp {};
+  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+
+  // We disallow copying Tests.
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+  // C'tor.  TestProperty does NOT have a default constructor.
+  // Always use this constructor (with parameters) to create a
+  // TestProperty object.
+  TestProperty(const std::string& a_key, const std::string& a_value) :
+    key_(a_key), value_(a_value) {
+  }
+
+  // Gets the user supplied key.
+  const char* key() const {
+    return key_.c_str();
+  }
+
+  // Gets the user supplied value.
+  const char* value() const {
+    return value_.c_str();
+  }
+
+  // Sets a new value, overriding the one supplied in the constructor.
+  void SetValue(const std::string& new_value) {
+    value_ = new_value;
+  }
+
+ private:
+  // The key supplied by the user.
+  std::string key_;
+  // The value supplied by the user.
+  std::string value_;
+};
+
+// The result of a single Test.  This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+  // Creates an empty TestResult.
+  TestResult();
+
+  // D'tor.  Do not inherit from TestResult.
+  ~TestResult();
+
+  // Gets the number of all test parts.  This is the sum of the number
+  // of successful test parts and the number of failed test parts.
+  int total_part_count() const;
+
+  // Returns the number of the test properties.
+  int test_property_count() const;
+
+  // Returns true iff the test passed (i.e. no test part failed).
+  bool Passed() const { return !Failed(); }
+
+  // Returns true iff the test failed.
+  bool Failed() const;
+
+  // Returns true iff the test fatally failed.
+  bool HasFatalFailure() const;
+
+  // Returns true iff the test has a non-fatal failure.
+  bool HasNonfatalFailure() const;
+
+  // Returns the elapsed time, in milliseconds.
+  TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+  // Returns the i-th test part result among all the results. i can range
+  // from 0 to test_property_count() - 1. If i is not in that range, aborts
+  // the program.
+  const TestPartResult& GetTestPartResult(int i) const;
+
+  // Returns the i-th test property. i can range from 0 to
+  // test_property_count() - 1. If i is not in that range, aborts the
+  // program.
+  const TestProperty& GetTestProperty(int i) const;
+
+ private:
+  friend class TestInfo;
+  friend class TestCase;
+  friend class UnitTest;
+  friend class internal::DefaultGlobalTestPartResultReporter;
+  friend class internal::ExecDeathTest;
+  friend class internal::TestResultAccessor;
+  friend class internal::UnitTestImpl;
+  friend class internal::WindowsDeathTest;
+
+  // Gets the vector of TestPartResults.
+  const std::vector<TestPartResult>& test_part_results() const {
+    return test_part_results_;
+  }
+
+  // Gets the vector of TestProperties.
+  const std::vector<TestProperty>& test_properties() const {
+    return test_properties_;
+  }
+
+  // Sets the elapsed time.
+  void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+  // Adds a test property to the list. The property is validated and may add
+  // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+  // key names). If a property is already recorded for the same key, the
+  // value will be updated, rather than storing multiple values for the same
+  // key.  xml_element specifies the element for which the property is being
+  // recorded and is used for validation.
+  void RecordProperty(const std::string& xml_element,
+                      const TestProperty& test_property);
+
+  // Adds a failure if the key is a reserved attribute of Google Test
+  // testcase tags.  Returns true if the property is valid.
+  // TODO(russr): Validate attribute names are legal and human readable.
+  static bool ValidateTestProperty(const std::string& xml_element,
+                                   const TestProperty& test_property);
+
+  // Adds a test part result to the list.
+  void AddTestPartResult(const TestPartResult& test_part_result);
+
+  // Returns the death test count.
+  int death_test_count() const { return death_test_count_; }
+
+  // Increments the death test count, returning the new count.
+  int increment_death_test_count() { return ++death_test_count_; }
+
+  // Clears the test part results.
+  void ClearTestPartResults();
+
+  // Clears the object.
+  void Clear();
+
+  // Protects mutable state of the property vector and of owned
+  // properties, whose values may be updated.
+  internal::Mutex test_properites_mutex_;
+
+  // The vector of TestPartResults
+  std::vector<TestPartResult> test_part_results_;
+  // The vector of TestProperties
+  std::vector<TestProperty> test_properties_;
+  // Running count of death tests.
+  int death_test_count_;
+  // The elapsed time, in milliseconds.
+  TimeInMillis elapsed_time_;
+
+  // We disallow copying TestResult.
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+};  // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+//   Test case name
+//   Test name
+//   Whether the test should be run
+//   A function pointer that creates the test object when invoked
+//   Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+  // Destructs a TestInfo object.  This function is not virtual, so
+  // don't inherit from TestInfo.
+  ~TestInfo();
+
+  // Returns the test case name.
+  const char* test_case_name() const { return test_case_name_.c_str(); }
+
+  // Returns the test name.
+  const char* name() const { return name_.c_str(); }
+
+  // Returns the name of the parameter type, or NULL if this is not a typed
+  // or a type-parameterized test.
+  const char* type_param() const {
+    if (type_param_.get() != NULL)
+      return type_param_->c_str();
+    return NULL;
+  }
+
+  // Returns the text representation of the value parameter, or NULL if this
+  // is not a value-parameterized test.
+  const char* value_param() const {
+    if (value_param_.get() != NULL)
+      return value_param_->c_str();
+    return NULL;
+  }
+
+  // Returns the file name where this test is defined.
+  const char* file() const { return location_.file.c_str(); }
+
+  // Returns the line where this test is defined.
+  int line() const { return location_.line; }
+
+  // Returns true if this test should run, that is if the test is not
+  // disabled (or it is disabled but the also_run_disabled_tests flag has
+  // been specified) and its full name matches the user-specified filter.
+  //
+  // Google Test allows the user to filter the tests by their full names.
+  // The full name of a test Bar in test case Foo is defined as
+  // "Foo.Bar".  Only the tests that match the filter will run.
+  //
+  // A filter is a colon-separated list of glob (not regex) patterns,
+  // optionally followed by a '-' and a colon-separated list of
+  // negative patterns (tests to exclude).  A test is run if it
+  // matches one of the positive patterns and does not match any of
+  // the negative patterns.
+  //
+  // For example, *A*:Foo.* is a filter that matches any string that
+  // contains the character 'A' or starts with "Foo.".
+  bool should_run() const { return should_run_; }
+
+  // Returns true iff this test will appear in the XML report.
+  bool is_reportable() const {
+    // For now, the XML report includes all tests matching the filter.
+    // In the future, we may trim tests that are excluded because of
+    // sharding.
+    return matches_filter_;
+  }
+
+  // Returns the result of the test.
+  const TestResult* result() const { return &result_; }
+
+ private:
+#if GTEST_HAS_DEATH_TEST
+  friend class internal::DefaultDeathTestFactory;
+#endif  // GTEST_HAS_DEATH_TEST
+  friend class Test;
+  friend class TestCase;
+  friend class internal::UnitTestImpl;
+  friend class internal::StreamingListenerTest;
+  friend TestInfo* internal::MakeAndRegisterTestInfo(
+      const char* test_case_name,
+      const char* name,
+      const char* type_param,
+      const char* value_param,
+      internal::CodeLocation code_location,
+      internal::TypeId fixture_class_id,
+      Test::SetUpTestCaseFunc set_up_tc,
+      Test::TearDownTestCaseFunc tear_down_tc,
+      internal::TestFactoryBase* factory);
+
+  // Constructs a TestInfo object. The newly constructed instance assumes
+  // ownership of the factory object.
+  TestInfo(const std::string& test_case_name,
+           const std::string& name,
+           const char* a_type_param,   // NULL if not a type-parameterized test
+           const char* a_value_param,  // NULL if not a value-parameterized test
+           internal::CodeLocation a_code_location,
+           internal::TypeId fixture_class_id,
+           internal::TestFactoryBase* factory);
+
+  // Increments the number of death tests encountered in this test so
+  // far.
+  int increment_death_test_count() {
+    return result_.increment_death_test_count();
+  }
+
+  // Creates the test object, runs it, records its result, and then
+  // deletes it.
+  void Run();
+
+  static void ClearTestResult(TestInfo* test_info) {
+    test_info->result_.Clear();
+  }
+
+  // These fields are immutable properties of the test.
+  const std::string test_case_name_;     // Test case name
+  const std::string name_;               // Test name
+  // Name of the parameter type, or NULL if this is not a typed or a
+  // type-parameterized test.
+  const internal::scoped_ptr<const ::std::string> type_param_;
+  // Text representation of the value parameter, or NULL if this is not a
+  // value-parameterized test.
+  const internal::scoped_ptr<const ::std::string> value_param_;
+  internal::CodeLocation location_;
+  const internal::TypeId fixture_class_id_;   // ID of the test fixture class
+  bool should_run_;                 // True iff this test should run
+  bool is_disabled_;                // True iff this test is disabled
+  bool matches_filter_;             // True if this test matches the
+                                    // user-specified filter.
+  internal::TestFactoryBase* const factory_;  // The factory that creates
+                                              // the test object
+
+  // This field is mutable and needs to be reset before running the
+  // test for the second time.
+  TestResult result_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+  // Creates a TestCase with the given name.
+  //
+  // TestCase does NOT have a default constructor.  Always use this
+  // constructor to create a TestCase object.
+  //
+  // Arguments:
+  //
+  //   name:         name of the test case
+  //   a_type_param: the name of the test's type parameter, or NULL if
+  //                 this is not a type-parameterized test.
+  //   set_up_tc:    pointer to the function that sets up the test case
+  //   tear_down_tc: pointer to the function that tears down the test case
+  TestCase(const char* name, const char* a_type_param,
+           Test::SetUpTestCaseFunc set_up_tc,
+           Test::TearDownTestCaseFunc tear_down_tc);
+
+  // Destructor of TestCase.
+  virtual ~TestCase();
+
+  // Gets the name of the TestCase.
+  const char* name() const { return name_.c_str(); }
+
+  // Returns the name of the parameter type, or NULL if this is not a
+  // type-parameterized test case.
+  const char* type_param() const {
+    if (type_param_.get() != NULL)
+      return type_param_->c_str();
+    return NULL;
+  }
+
+  // Returns true if any test in this test case should run.
+  bool should_run() const { return should_run_; }
+
+  // Gets the number of successful tests in this test case.
+  int successful_test_count() const;
+
+  // Gets the number of failed tests in this test case.
+  int failed_test_count() const;
+
+  // Gets the number of disabled tests that will be reported in the XML report.
+  int reportable_disabled_test_count() const;
+
+  // Gets the number of disabled tests in this test case.
+  int disabled_test_count() const;
+
+  // Gets the number of tests to be printed in the XML report.
+  int reportable_test_count() const;
+
+  // Get the number of tests in this test case that should run.
+  int test_to_run_count() const;
+
+  // Gets the number of all tests in this test case.
+  int total_test_count() const;
+
+  // Returns true iff the test case passed.
+  bool Passed() const { return !Failed(); }
+
+  // Returns true iff the test case failed.
+  bool Failed() const { return failed_test_count() > 0; }
+
+  // Returns the elapsed time, in milliseconds.
+  TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+  // Returns the i-th test among all the tests. i can range from 0 to
+  // total_test_count() - 1. If i is not in that range, returns NULL.
+  const TestInfo* GetTestInfo(int i) const;
+
+  // Returns the TestResult that holds test properties recorded during
+  // execution of SetUpTestCase and TearDownTestCase.
+  const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }
+
+ private:
+  friend class Test;
+  friend class internal::UnitTestImpl;
+
+  // Gets the (mutable) vector of TestInfos in this TestCase.
+  std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+  // Gets the (immutable) vector of TestInfos in this TestCase.
+  const std::vector<TestInfo*>& test_info_list() const {
+    return test_info_list_;
+  }
+
+  // Returns the i-th test among all the tests. i can range from 0 to
+  // total_test_count() - 1. If i is not in that range, returns NULL.
+  TestInfo* GetMutableTestInfo(int i);
+
+  // Sets the should_run member.
+  void set_should_run(bool should) { should_run_ = should; }
+
+  // Adds a TestInfo to this test case.  Will delete the TestInfo upon
+  // destruction of the TestCase object.
+  void AddTestInfo(TestInfo * test_info);
+
+  // Clears the results of all tests in this test case.
+  void ClearResult();
+
+  // Clears the results of all tests in the given test case.
+  static void ClearTestCaseResult(TestCase* test_case) {
+    test_case->ClearResult();
+  }
+
+  // Runs every test in this TestCase.
+  void Run();
+
+  // Runs SetUpTestCase() for this TestCase.  This wrapper is needed
+  // for catching exceptions thrown from SetUpTestCase().
+  void RunSetUpTestCase() { (*set_up_tc_)(); }
+
+  // Runs TearDownTestCase() for this TestCase.  This wrapper is
+  // needed for catching exceptions thrown from TearDownTestCase().
+  void RunTearDownTestCase() { (*tear_down_tc_)(); }
+
+  // Returns true iff test passed.
+  static bool TestPassed(const TestInfo* test_info) {
+    return test_info->should_run() && test_info->result()->Passed();
+  }
+
+  // Returns true iff test failed.
+  static bool TestFailed(const TestInfo* test_info) {
+    return test_info->should_run() && test_info->result()->Failed();
+  }
+
+  // Returns true iff the test is disabled and will be reported in the XML
+  // report.
+  static bool TestReportableDisabled(const TestInfo* test_info) {
+    return test_info->is_reportable() && test_info->is_disabled_;
+  }
+
+  // Returns true iff test is disabled.
+  static bool TestDisabled(const TestInfo* test_info) {
+    return test_info->is_disabled_;
+  }
+
+  // Returns true iff this test will appear in the XML report.
+  static bool TestReportable(const TestInfo* test_info) {
+    return test_info->is_reportable();
+  }
+
+  // Returns true if the given test should run.
+  static bool ShouldRunTest(const TestInfo* test_info) {
+    return test_info->should_run();
+  }
+
+  // Shuffles the tests in this test case.
+  void ShuffleTests(internal::Random* random);
+
+  // Restores the test order to before the first shuffle.
+  void UnshuffleTests();
+
+  // Name of the test case.
+  std::string name_;
+  // Name of the parameter type, or NULL if this is not a typed or a
+  // type-parameterized test.
+  const internal::scoped_ptr<const ::std::string> type_param_;
+  // The vector of TestInfos in their original order.  It owns the
+  // elements in the vector.
+  std::vector<TestInfo*> test_info_list_;
+  // Provides a level of indirection for the test list to allow easy
+  // shuffling and restoring the test order.  The i-th element in this
+  // vector is the index of the i-th test in the shuffled test list.
+  std::vector<int> test_indices_;
+  // Pointer to the function that sets up the test case.
+  Test::SetUpTestCaseFunc set_up_tc_;
+  // Pointer to the function that tears down the test case.
+  Test::TearDownTestCaseFunc tear_down_tc_;
+  // True iff any test in this test case should run.
+  bool should_run_;
+  // Elapsed time, in milliseconds.
+  TimeInMillis elapsed_time_;
+  // Holds test properties recorded during execution of SetUpTestCase and
+  // TearDownTestCase.
+  TestResult ad_hoc_test_result_;
+
+  // We disallow copying TestCases.
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment.  You should subclass this to define your own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+//   1. You cannot safely throw from a destructor.  This is a problem
+//      as in some cases Google Test is used where exceptions are enabled, and
+//      we may want to implement ASSERT_* using exceptions where they are
+//      available.
+//   2. You cannot use ASSERT_* directly in a constructor or
+//      destructor.
+class Environment {
+ public:
+  // The d'tor is virtual as we need to subclass Environment.
+  virtual ~Environment() {}
+
+  // Override this to define how to set up the environment.
+  virtual void SetUp() {}
+
+  // Override this to define how to tear down the environment.
+  virtual void TearDown() {}
+ private:
+  // If you see an error about overriding the following function or
+  // about it being private, you have mis-spelled SetUp() as Setup().
+  struct Setup_should_be_spelled_SetUp {};
+  virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+};
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+  virtual ~TestEventListener() {}
+
+  // Fired before any test activity starts.
+  virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+  // Fired before each iteration of tests starts.  There may be more than
+  // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+  // index, starting from 0.
+  virtual void OnTestIterationStart(const UnitTest& unit_test,
+                                    int iteration) = 0;
+
+  // Fired before environment set-up for each iteration of tests starts.
+  virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+  // Fired after environment set-up for each iteration of tests ends.
+  virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+  // Fired before the test case starts.
+  virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+  // Fired before the test starts.
+  virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+  // Fired after a failed assertion or a SUCCEED() invocation.
+  virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+  // Fired after the test ends.
+  virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+  // Fired after the test case ends.
+  virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+  // Fired before environment tear-down for each iteration of tests starts.
+  virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+  // Fired after environment tear-down for each iteration of tests ends.
+  virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+  // Fired after each iteration of tests finishes.
+  virtual void OnTestIterationEnd(const UnitTest& unit_test,
+                                  int iteration) = 0;
+
+  // Fired after all test activities have ended.
+  virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build.  For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+  virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+                                    int /*iteration*/) {}
+  virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+  virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+  virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+  virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+  virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+  virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+  virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+  virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+  virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+                                  int /*iteration*/) {}
+  virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+  TestEventListeners();
+  ~TestEventListeners();
+
+  // Appends an event listener to the end of the list. Google Test assumes
+  // the ownership of the listener (i.e. it will delete the listener when
+  // the test program finishes).
+  void Append(TestEventListener* listener);
+
+  // Removes the given event listener from the list and returns it.  It then
+  // becomes the caller's responsibility to delete the listener. Returns
+  // NULL if the listener is not found in the list.
+  TestEventListener* Release(TestEventListener* listener);
+
+  // Returns the standard listener responsible for the default console
+  // output.  Can be removed from the listeners list to shut down default
+  // console output.  Note that removing this object from the listener list
+  // with Release transfers its ownership to the caller and makes this
+  // function return NULL the next time.
+  TestEventListener* default_result_printer() const {
+    return default_result_printer_;
+  }
+
+  // Returns the standard listener responsible for the default XML output
+  // controlled by the --gtest_output=xml flag.  Can be removed from the
+  // listeners list by users who want to shut down the default XML output
+  // controlled by this flag and substitute it with custom one.  Note that
+  // removing this object from the listener list with Release transfers its
+  // ownership to the caller and makes this function return NULL the next
+  // time.
+  TestEventListener* default_xml_generator() const {
+    return default_xml_generator_;
+  }
+
+ private:
+  friend class TestCase;
+  friend class TestInfo;
+  friend class internal::DefaultGlobalTestPartResultReporter;
+  friend class internal::NoExecDeathTest;
+  friend class internal::TestEventListenersAccessor;
+  friend class internal::UnitTestImpl;
+
+  // Returns repeater that broadcasts the TestEventListener events to all
+  // subscribers.
+  TestEventListener* repeater();
+
+  // Sets the default_result_printer attribute to the provided listener.
+  // The listener is also added to the listener list and previous
+  // default_result_printer is removed from it and deleted. The listener can
+  // also be NULL in which case it will not be added to the list. Does
+  // nothing if the previous and the current listener objects are the same.
+  void SetDefaultResultPrinter(TestEventListener* listener);
+
+  // Sets the default_xml_generator attribute to the provided listener.  The
+  // listener is also added to the listener list and previous
+  // default_xml_generator is removed from it and deleted. The listener can
+  // also be NULL in which case it will not be added to the list. Does
+  // nothing if the previous and the current listener objects are the same.
+  void SetDefaultXmlGenerator(TestEventListener* listener);
+
+  // Controls whether events will be forwarded by the repeater to the
+  // listeners in the list.
+  bool EventForwardingEnabled() const;
+  void SuppressEventForwarding();
+
+  // The actual list of listeners.
+  internal::TestEventRepeater* repeater_;
+  // Listener responsible for the standard result output.
+  TestEventListener* default_result_printer_;
+  // Listener responsible for the creation of the XML output file.
+  TestEventListener* default_xml_generator_;
+
+  // We disallow copying TestEventListeners.
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
+//
+// This is a singleton class.  The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called.  This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+  // Gets the singleton UnitTest object.  The first time this method
+  // is called, a UnitTest object is constructed and returned.
+  // Consecutive calls will return the same object.
+  static UnitTest* GetInstance();
+
+  // Runs all tests in this UnitTest object and prints the result.
+  // Returns 0 if successful, or 1 otherwise.
+  //
+  // This method can only be called from the main thread.
+  //
+  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+  int Run() GTEST_MUST_USE_RESULT_;
+
+  // Returns the working directory when the first TEST() or TEST_F()
+  // was executed.  The UnitTest object owns the string.
+  const char* original_working_dir() const;
+
+  // Returns the TestCase object for the test that's currently running,
+  // or NULL if no test is running.
+  const TestCase* current_test_case() const
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // Returns the TestInfo object for the test that's currently running,
+  // or NULL if no test is running.
+  const TestInfo* current_test_info() const
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // Returns the random seed used at the start of the current test run.
+  int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
+  // Returns the ParameterizedTestCaseRegistry object used to keep track of
+  // value-parameterized tests and instantiate and register them.
+  //
+  // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+  internal::ParameterizedTestCaseRegistry& parameterized_test_registry()
+      GTEST_LOCK_EXCLUDED_(mutex_);
+#endif  // GTEST_HAS_PARAM_TEST
+
+  // Gets the number of successful test cases.
+  int successful_test_case_count() const;
+
+  // Gets the number of failed test cases.
+  int failed_test_case_count() const;
+
+  // Gets the number of all test cases.
+  int total_test_case_count() const;
+
+  // Gets the number of all test cases that contain at least one test
+  // that should run.
+  int test_case_to_run_count() const;
+
+  // Gets the number of successful tests.
+  int successful_test_count() const;
+
+  // Gets the number of failed tests.
+  int failed_test_count() const;
+
+  // Gets the number of disabled tests that will be reported in the XML report.
+  int reportable_disabled_test_count() const;
+
+  // Gets the number of disabled tests.
+  int disabled_test_count() const;
+
+  // Gets the number of tests to be printed in the XML report.
+  int reportable_test_count() const;
+
+  // Gets the number of all tests.
+  int total_test_count() const;
+
+  // Gets the number of tests that should run.
+  int test_to_run_count() const;
+
+  // Gets the time of the test program start, in ms from the start of the
+  // UNIX epoch.
+  TimeInMillis start_timestamp() const;
+
+  // Gets the elapsed time, in milliseconds.
+  TimeInMillis elapsed_time() const;
+
+  // Returns true iff the unit test passed (i.e. all test cases passed).
+  bool Passed() const;
+
+  // Returns true iff the unit test failed (i.e. some test case failed
+  // or something outside of all tests failed).
+  bool Failed() const;
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  const TestCase* GetTestCase(int i) const;
+
+  // Returns the TestResult containing information on test failures and
+  // properties logged outside of individual test cases.
+  const TestResult& ad_hoc_test_result() const;
+
+  // Returns the list of event listeners that can be used to track events
+  // inside Google Test.
+  TestEventListeners& listeners();
+
+ private:
+  // Registers and returns a global test environment.  When a test
+  // program is run, all global test environments will be set-up in
+  // the order they were registered.  After all tests in the program
+  // have finished, all global test environments will be torn-down in
+  // the *reverse* order they were registered.
+  //
+  // The UnitTest object takes ownership of the given environment.
+  //
+  // This method can only be called from the main thread.
+  Environment* AddEnvironment(Environment* env);
+
+  // Adds a TestPartResult to the current TestResult object.  All
+  // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+  // eventually call this to report their results.  The user code
+  // should use the assertion macros instead of calling this directly.
+  void AddTestPartResult(TestPartResult::Type result_type,
+                         const char* file_name,
+                         int line_number,
+                         const std::string& message,
+                         const std::string& os_stack_trace)
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // Adds a TestProperty to the current TestResult object when invoked from
+  // inside a test, to current TestCase's ad_hoc_test_result_ when invoked
+  // from SetUpTestCase or TearDownTestCase, or to the global property set
+  // when invoked elsewhere.  If the result already contains a property with
+  // the same key, the value will be updated.
+  void RecordProperty(const std::string& key, const std::string& value);
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  TestCase* GetMutableTestCase(int i);
+
+  // Accessors for the implementation object.
+  internal::UnitTestImpl* impl() { return impl_; }
+  const internal::UnitTestImpl* impl() const { return impl_; }
+
+  // These classes and funcions are friends as they need to access private
+  // members of UnitTest.
+  friend class Test;
+  friend class internal::AssertHelper;
+  friend class internal::ScopedTrace;
+  friend class internal::StreamingListenerTest;
+  friend class internal::UnitTestRecordPropertyTestHelper;
+  friend Environment* AddGlobalTestEnvironment(Environment* env);
+  friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+  friend void internal::ReportFailureInUnknownLocation(
+      TestPartResult::Type result_type,
+      const std::string& message);
+
+  // Creates an empty UnitTest.
+  UnitTest();
+
+  // D'tor
+  virtual ~UnitTest();
+
+  // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+  // Google Test trace stack.
+  void PushGTestTrace(const internal::TraceInfo& trace)
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // Pops a trace from the per-thread Google Test trace stack.
+  void PopGTestTrace()
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // Protects mutable state in *impl_.  This is mutable as some const
+  // methods need to lock it too.
+  mutable internal::Mutex mutex_;
+
+  // Opaque implementation object.  This field is never changed once
+  // the object is constructed.  We don't mark it as const here, as
+  // doing so will cause a warning in the constructor of UnitTest.
+  // Mutable state in *impl_ is protected by mutex_.
+  internal::UnitTestImpl* impl_;
+
+  // We disallow copying UnitTest.
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main().  If you use gtest_main, you need to call this before main()
+// starts for it to take effect.  For example, you can define a global
+// variable like this:
+//
+//   testing::Environment* const foo_env =
+//       testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+  return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test.  This must be called before calling
+// RUN_ALL_TESTS().  In particular, it parses a command line for the
+// flags that Google Test recognizes.  Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned.  Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+namespace internal {
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_* in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQFailure(const char* lhs_expression,
+                                   const char* rhs_expression,
+                                   const T1& lhs, const T2& rhs) {
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   FormatForComparisonFailureMessage(lhs, rhs),
+                   FormatForComparisonFailureMessage(rhs, lhs),
+                   false);
+}
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* lhs_expression,
+                            const char* rhs_expression,
+                            const T1& lhs,
+                            const T2& rhs) {
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4389 /* signed/unsigned mismatch */)
+  if (lhs == rhs) {
+    return AssertionSuccess();
+  }
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+  return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+// With this overloaded version, we allow anonymous enums to be used
+// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
+// can be implicitly cast to BiggestInt.
+GTEST_API_ AssertionResult CmpHelperEQ(const char* lhs_expression,
+                                       const char* rhs_expression,
+                                       BiggestInt lhs,
+                                       BiggestInt rhs);
+
+// The helper class for {ASSERT|EXPECT}_EQ.  The template argument
+// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
+// is a null pointer literal.  The following default implementation is
+// for lhs_is_null_literal being false.
+template <bool lhs_is_null_literal>
+class EqHelper {
+ public:
+  // This templatized version is for the general case.
+  template <typename T1, typename T2>
+  static AssertionResult Compare(const char* lhs_expression,
+                                 const char* rhs_expression,
+                                 const T1& lhs,
+                                 const T2& rhs) {
+    return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+  }
+
+  // With this overloaded version, we allow anonymous enums to be used
+  // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+  // enums can be implicitly cast to BiggestInt.
+  //
+  // Even though its body looks the same as the above version, we
+  // cannot merge the two, as it will make anonymous enums unhappy.
+  static AssertionResult Compare(const char* lhs_expression,
+                                 const char* rhs_expression,
+                                 BiggestInt lhs,
+                                 BiggestInt rhs) {
+    return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+  }
+};
+
+// This specialization is used when the first argument to ASSERT_EQ()
+// is a null pointer literal, like NULL, false, or 0.
+template <>
+class EqHelper<true> {
+ public:
+  // We define two overloaded versions of Compare().  The first
+  // version will be picked when the second argument to ASSERT_EQ() is
+  // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
+  // EXPECT_EQ(false, a_bool).
+  template <typename T1, typename T2>
+  static AssertionResult Compare(
+      const char* lhs_expression,
+      const char* rhs_expression,
+      const T1& lhs,
+      const T2& rhs,
+      // The following line prevents this overload from being considered if T2
+      // is not a pointer type.  We need this because ASSERT_EQ(NULL, my_ptr)
+      // expands to Compare("", "", NULL, my_ptr), which requires a conversion
+      // to match the Secret* in the other overload, which would otherwise make
+      // this template match better.
+      typename EnableIf<!is_pointer<T2>::value>::type* = 0) {
+    return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+  }
+
+  // This version will be picked when the second argument to ASSERT_EQ() is a
+  // pointer, e.g. ASSERT_EQ(NULL, a_pointer).
+  template <typename T>
+  static AssertionResult Compare(
+      const char* lhs_expression,
+      const char* rhs_expression,
+      // We used to have a second template parameter instead of Secret*.  That
+      // template parameter would deduce to 'long', making this a better match
+      // than the first overload even without the first overload's EnableIf.
+      // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to
+      // non-pointer argument" (even a deduced integral argument), so the old
+      // implementation caused warnings in user code.
+      Secret* /* lhs (NULL) */,
+      T* rhs) {
+    // We already know that 'lhs' is a null pointer.
+    return CmpHelperEQ(lhs_expression, rhs_expression,
+                       static_cast<T*>(NULL), rhs);
+  }
+};
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_OP in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2,
+                                   const T1& val1, const T2& val2,
+                                   const char* op) {
+  return AssertionFailure()
+         << "Expected: (" << expr1 << ") " << op << " (" << expr2
+         << "), actual: " << FormatForComparisonFailureMessage(val1, val2)
+         << " vs " << FormatForComparisonFailureMessage(val2, val1);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??.  It is here just to avoid copy-and-paste
+// of similar code.
+//
+// For each templatized helper function, we also define an overloaded
+// version for BiggestInt in order to reduce code bloat and allow
+// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
+// with gcc 4.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+                                   const T1& val1, const T2& val2) {\
+  if (val1 op val2) {\
+    return AssertionSuccess();\
+  } else {\
+    return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\
+  }\
+}\
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+    const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=);
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=);
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, <);
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=);
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, >);
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+                                          const char* s2_expression,
+                                          const char* s1,
+                                          const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* s1_expression,
+                                              const char* s2_expression,
+                                              const char* s1,
+                                              const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+                                          const char* s2_expression,
+                                          const char* s1,
+                                          const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+                                              const char* s2_expression,
+                                              const char* s1,
+                                              const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+                                          const char* s2_expression,
+                                          const wchar_t* s1,
+                                          const wchar_t* s2);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+                                          const char* s2_expression,
+                                          const wchar_t* s1,
+                                          const wchar_t* s2);
+
+}  // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves.  They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+    const char* needle_expr, const char* haystack_expr,
+    const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif  // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+//   RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression,
+                                         const char* rhs_expression,
+                                         RawType lhs_value,
+                                         RawType rhs_value) {
+  const FloatingPoint<RawType> lhs(lhs_value), rhs(rhs_value);
+
+  if (lhs.AlmostEquals(rhs)) {
+    return AssertionSuccess();
+  }
+
+  ::std::stringstream lhs_ss;
+  lhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+         << lhs_value;
+
+  ::std::stringstream rhs_ss;
+  rhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+         << rhs_value;
+
+  return EqFailure(lhs_expression,
+                   rhs_expression,
+                   StringStreamToString(&lhs_ss),
+                   StringStreamToString(&rhs_ss),
+                   false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+                                                const char* expr2,
+                                                const char* abs_error_expr,
+                                                double val1,
+                                                double val2,
+                                                double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+  // Constructor.
+  AssertHelper(TestPartResult::Type type,
+               const char* file,
+               int line,
+               const char* message);
+  ~AssertHelper();
+
+  // Message assignment is a semantic trick to enable assertion
+  // streaming; see the GTEST_MESSAGE_ macro below.
+  void operator=(const Message& message) const;
+
+ private:
+  // We put our data in a struct so that the size of the AssertHelper class can
+  // be as small as possible.  This is important because gcc is incapable of
+  // re-using stack space even for temporary variables, so every EXPECT_EQ
+  // reserves stack space for another AssertHelper.
+  struct AssertHelperData {
+    AssertHelperData(TestPartResult::Type t,
+                     const char* srcfile,
+                     int line_num,
+                     const char* msg)
+        : type(t), file(srcfile), line(line_num), message(msg) { }
+
+    TestPartResult::Type const type;
+    const char* const file;
+    int const line;
+    std::string const message;
+
+   private:
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+  };
+
+  AssertHelperData* const data_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+}  // namespace internal
+
+#if GTEST_HAS_PARAM_TEST
+// The pure interface class that all value-parameterized tests inherit from.
+// A value-parameterized class must inherit from both ::testing::Test and
+// ::testing::WithParamInterface. In most cases that just means inheriting
+// from ::testing::TestWithParam, but more complicated test hierarchies
+// may need to inherit from Test and WithParamInterface at different levels.
+//
+// This interface has support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+//  protected:
+//   FooTest() {
+//     // Can use GetParam() here.
+//   }
+//   virtual ~FooTest() {
+//     // Can use GetParam() here.
+//   }
+//   virtual void SetUp() {
+//     // Can use GetParam() here.
+//   }
+//   virtual void TearDown {
+//     // Can use GetParam() here.
+//   }
+// };
+// TEST_P(FooTest, DoesBar) {
+//   // Can use GetParam() method here.
+//   Foo foo;
+//   ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class WithParamInterface {
+ public:
+  typedef T ParamType;
+  virtual ~WithParamInterface() {}
+
+  // The current parameter value. Is also available in the test fixture's
+  // constructor. This member function is non-static, even though it only
+  // references static data, to reduce the opportunity for incorrect uses
+  // like writing 'WithParamInterface<bool>::GetParam()' for a test that
+  // uses a fixture whose parameter type is int.
+  const ParamType& GetParam() const {
+    GTEST_CHECK_(parameter_ != NULL)
+        << "GetParam() can only be called inside a value-parameterized test "
+        << "-- did you intend to write TEST_P instead of TEST_F?";
+    return *parameter_;
+  }
+
+ private:
+  // Sets parameter value. The caller is responsible for making sure the value
+  // remains alive and unchanged throughout the current test.
+  static void SetParam(const ParamType* parameter) {
+    parameter_ = parameter;
+  }
+
+  // Static value used for accessing parameter during a test lifetime.
+  static const ParamType* parameter_;
+
+  // TestClass must be a subclass of WithParamInterface<T> and Test.
+  template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* WithParamInterface<T>::parameter_ = NULL;
+
+// Most value-parameterized classes can ignore the existence of
+// WithParamInterface, and can just inherit from ::testing::TestWithParam.
+
+template <typename T>
+class TestWithParam : public Test, public WithParamInterface<T> {
+};
+
+#endif  // GTEST_HAS_PARAM_TEST
+
+// Macros for indicating success/failure in test code.
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied.  If not,
+// it behaves like ADD_FAILURE.  In particular:
+//
+//   EXPECT_TRUE  verifies that a Boolean condition is true.
+//   EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure.  People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a nonfatal failure at the given source file location with
+// a generic message.
+#define ADD_FAILURE_AT(file, line) \
+  GTEST_MESSAGE_AT_(file, line, "Failed", \
+                    ::testing::TestPartResult::kNonFatalFailure)
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+# define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+# define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+//    * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+//         Tests that the statement throws the expected exception.
+//    * {ASSERT|EXPECT}_NO_THROW(statement):
+//         Tests that the statement doesn't throw any exception.
+//    * {ASSERT|EXPECT}_ANY_THROW(statement):
+//         Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+  GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+  GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+  GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+  GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+  GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+  GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define EXPECT_TRUE(condition) \
+  GTEST_TEST_BOOLEAN_((condition), #condition, false, true, \
+                      GTEST_NONFATAL_FAILURE_)
+#define EXPECT_FALSE(condition) \
+  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+                      GTEST_NONFATAL_FAILURE_)
+#define ASSERT_TRUE(condition) \
+  GTEST_TEST_BOOLEAN_((condition), #condition, false, true, \
+                      GTEST_FATAL_FAILURE_)
+#define ASSERT_FALSE(condition) \
+  GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+                      GTEST_FATAL_FAILURE_)
+
+// Includes the auto-generated header that implements a family of
+// generic predicate assertion macros.
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command
+// 'gen_gtest_pred_impl.py 5'.  DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Makes sure this header is not included before gtest.h.
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+# error Do not include gtest_pred_impl.h directly.  Include gtest.h instead.
+#endif  // GTEST_INCLUDE_GTEST_GTEST_H_
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+//   ASSERT_PRED_FORMAT1(pred_format, v1)
+//   ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+//   ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult.  See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+//   ASSERT_PRED1(pred, v1)
+//   ASSERT_PRED2(pred, v1, v2)
+//   ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce.  Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+  GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+  if (const ::testing::AssertionResult gtest_ar = (expression)) \
+    ; \
+  else \
+    on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1.  Don't use
+// this in your code.
+template <typename Pred,
+          typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+                                  const char* e1,
+                                  Pred pred,
+                                  const T1& v1) {
+  if (pred(v1)) return AssertionSuccess();
+
+  return AssertionFailure() << pred_text << "("
+                            << e1 << ") evaluates to false, where"
+                            << "\n" << e1 << " evaluates to " << v1;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+  GTEST_ASSERT_(pred_format(#v1, v1), \
+                on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1.  Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+  GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+                                             #v1, \
+                                             pred, \
+                                             v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+  GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+  GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+  GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2.  Don't use
+// this in your code.
+template <typename Pred,
+          typename T1,
+          typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+                                  const char* e1,
+                                  const char* e2,
+                                  Pred pred,
+                                  const T1& v1,
+                                  const T2& v2) {
+  if (pred(v1, v2)) return AssertionSuccess();
+
+  return AssertionFailure() << pred_text << "("
+                            << e1 << ", "
+                            << e2 << ") evaluates to false, where"
+                            << "\n" << e1 << " evaluates to " << v1
+                            << "\n" << e2 << " evaluates to " << v2;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+  GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
+                on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2.  Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+  GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+                                             #v1, \
+                                             #v2, \
+                                             pred, \
+                                             v1, \
+                                             v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+  GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+  GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+  GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3.  Don't use
+// this in your code.
+template <typename Pred,
+          typename T1,
+          typename T2,
+          typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+                                  const char* e1,
+                                  const char* e2,
+                                  const char* e3,
+                                  Pred pred,
+                                  const T1& v1,
+                                  const T2& v2,
+                                  const T3& v3) {
+  if (pred(v1, v2, v3)) return AssertionSuccess();
+
+  return AssertionFailure() << pred_text << "("
+                            << e1 << ", "
+                            << e2 << ", "
+                            << e3 << ") evaluates to false, where"
+                            << "\n" << e1 << " evaluates to " << v1
+                            << "\n" << e2 << " evaluates to " << v2
+                            << "\n" << e3 << " evaluates to " << v3;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
+                on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3.  Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+  GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+                                             #v1, \
+                                             #v2, \
+                                             #v3, \
+                                             pred, \
+                                             v1, \
+                                             v2, \
+                                             v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+  GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+  GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+  GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4.  Don't use
+// this in your code.
+template <typename Pred,
+          typename T1,
+          typename T2,
+          typename T3,
+          typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+                                  const char* e1,
+                                  const char* e2,
+                                  const char* e3,
+                                  const char* e4,
+                                  Pred pred,
+                                  const T1& v1,
+                                  const T2& v2,
+                                  const T3& v3,
+                                  const T4& v4) {
+  if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+  return AssertionFailure() << pred_text << "("
+                            << e1 << ", "
+                            << e2 << ", "
+                            << e3 << ", "
+                            << e4 << ") evaluates to false, where"
+                            << "\n" << e1 << " evaluates to " << v1
+                            << "\n" << e2 << " evaluates to " << v2
+                            << "\n" << e3 << " evaluates to " << v3
+                            << "\n" << e4 << " evaluates to " << v4;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
+                on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4.  Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+  GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+                                             #v1, \
+                                             #v2, \
+                                             #v3, \
+                                             #v4, \
+                                             pred, \
+                                             v1, \
+                                             v2, \
+                                             v3, \
+                                             v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+  GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+  GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5.  Don't use
+// this in your code.
+template <typename Pred,
+          typename T1,
+          typename T2,
+          typename T3,
+          typename T4,
+          typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+                                  const char* e1,
+                                  const char* e2,
+                                  const char* e3,
+                                  const char* e4,
+                                  const char* e5,
+                                  Pred pred,
+                                  const T1& v1,
+                                  const T2& v2,
+                                  const T3& v3,
+                                  const T4& v4,
+                                  const T5& v5) {
+  if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+  return AssertionFailure() << pred_text << "("
+                            << e1 << ", "
+                            << e2 << ", "
+                            << e3 << ", "
+                            << e4 << ", "
+                            << e5 << ") evaluates to false, where"
+                            << "\n" << e1 << " evaluates to " << v1
+                            << "\n" << e2 << " evaluates to " << v2
+                            << "\n" << e3 << " evaluates to " << v3
+                            << "\n" << e4 << " evaluates to " << v4
+                            << "\n" << e5 << " evaluates to " << v5;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+  GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
+                on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5.  Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+  GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+                                             #v1, \
+                                             #v2, \
+                                             #v3, \
+                                             #v4, \
+                                             #v5, \
+                                             pred, \
+                                             v1, \
+                                             v2, \
+                                             v3, \
+                                             v4, \
+                                             v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+  GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+  GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Macros for testing equalities and inequalities.
+//
+//    * {ASSERT|EXPECT}_EQ(v1, v2): Tests that v1 == v2
+//    * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+//    * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+//    * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+//    * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+//    * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values.  The values must be compatible built-in types,
+// or you will get a compiler error.  By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+//   1. It is possible to make a user-defined type work with
+//   {ASSERT|EXPECT}_??(), but that requires overloading the
+//   comparison operators and is thus discouraged by the Google C++
+//   Usage Guide.  Therefore, you are advised to use the
+//   {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+//   equal.
+//
+//   2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+//   pointers (in particular, C strings).  Therefore, if you use it
+//   with two C strings, you are testing how their locations in memory
+//   are related, not how their content is related.  To compare two C
+//   strings by content, use {ASSERT|EXPECT}_STR*().
+//
+//   3. {ASSERT|EXPECT}_EQ(v1, v2) is preferred to
+//   {ASSERT|EXPECT}_TRUE(v1 == v2), as the former tells you
+//   what the actual value is when it fails, and similarly for the
+//   other comparisons.
+//
+//   4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+//   evaluate their arguments, which is undefined.
+//
+//   5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+//   EXPECT_NE(5, Foo());
+//   EXPECT_EQ(NULL, a_pointer);
+//   ASSERT_LT(i, array_size);
+//   ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal:: \
+                      EqHelper<GTEST_IS_NULL_LITERAL_(val1)>::Compare, \
+                      val1, val2)
+#define EXPECT_NE(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define EXPECT_LE(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define GTEST_ASSERT_EQ(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal:: \
+                      EqHelper<GTEST_IS_NULL_LITERAL_(val1)>::Compare, \
+                      val1, val2)
+#define GTEST_ASSERT_NE(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define GTEST_ASSERT_LE(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define GTEST_ASSERT_LT(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define GTEST_ASSERT_GE(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define GTEST_ASSERT_GT(val1, val2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
+// ASSERT_XY(), which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_ASSERT_EQ
+# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_NE
+# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LE
+# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LT
+# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GE
+# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GT
+# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
+#endif
+
+// C-string Comparisons.  All tests treat NULL and any non-NULL string
+// as different.  Two NULLs are equal.
+//
+//    * {ASSERT|EXPECT}_STREQ(s1, s2):     Tests that s1 == s2
+//    * {ASSERT|EXPECT}_STRNE(s1, s2):     Tests that s1 != s2
+//    * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+//    * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(s1, s2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define EXPECT_STRNE(s1, s2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(s1, s2) \
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define EXPECT_STRCASENE(s1, s2)\
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(s1, s2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define ASSERT_STRNE(s1, s2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(s1, s2) \
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define ASSERT_STRCASENE(s1, s2)\
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+//    * {ASSERT|EXPECT}_FLOAT_EQ(val1, val2):
+//         Tests that two float values are almost equal.
+//    * {ASSERT|EXPECT}_DOUBLE_EQ(val1, val2):
+//         Tests that two double values are almost equal.
+//    * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+//         Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands.  See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(val1, val2)\
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+                      val1, val2)
+
+#define EXPECT_DOUBLE_EQ(val1, val2)\
+  EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+                      val1, val2)
+
+#define ASSERT_FLOAT_EQ(val1, val2)\
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+                      val1, val2)
+
+#define ASSERT_DOUBLE_EQ(val1, val2)\
+  ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+                      val1, val2)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+  EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+                      val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+  ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+                      val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+//   EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2.  Fails
+// otherwise.  In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+                                   float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+                                    double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+//    * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+# define EXPECT_HRESULT_SUCCEEDED(expr) \
+    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define ASSERT_HRESULT_SUCCEEDED(expr) \
+    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define EXPECT_HRESULT_FAILED(expr) \
+    EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+# define ASSERT_HRESULT_FAILED(expr) \
+    ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif  // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+//   * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+//   EXPECT_NO_FATAL_FAILURE(Process());
+//   ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+    GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope.  The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+#define SCOPED_TRACE(message) \
+  ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+    __FILE__, __LINE__, ::testing::Message() << (message))
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type.  The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template.  This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated.  For example, given:
+//
+//   template <typename T> class Foo {
+//    public:
+//     void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+//   };
+//
+// the code:
+//
+//   void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated.  Instead, you need:
+//
+//   void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+  (void)internal::StaticAssertTypeEqHelper<T1, T2>();
+  return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The convention is to end the test case name with "Test".  For
+// example, a test case for the Foo class can be named FooTest.
+//
+// Test code should appear between braces after an invocation of
+// this macro.  Example:
+//
+//   TEST(FooTest, InitializesCorrectly) {
+//     Foo foo;
+//     EXPECT_TRUE(foo.StatusIsOK());
+//   }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test.  This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X.  The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code.  GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_case_name, test_name)\
+  GTEST_TEST_(test_case_name, test_name, \
+              ::testing::Test, ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name.  The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier.  The user should put
+// his test code between braces after using this macro.  Example:
+//
+//   class FooTest : public testing::Test {
+//    protected:
+//     virtual void SetUp() { b_.AddElement(3); }
+//
+//     Foo a_;
+//     Foo b_;
+//   };
+//
+//   TEST_F(FooTest, InitializesCorrectly) {
+//     EXPECT_TRUE(a_.StatusIsOK());
+//   }
+//
+//   TEST_F(FooTest, ReturnsElementCountCorrectly) {
+//     EXPECT_EQ(0, a_.size());
+//     EXPECT_EQ(1, b_.size());
+//   }
+
+#define TEST_F(test_fixture, test_name)\
+  GTEST_TEST_(test_fixture, test_name, test_fixture, \
+              ::testing::internal::GetTypeId<test_fixture>())
+
+}  // namespace testing
+
+// Use this function in main() to run all tests.  It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+//
+// This function was formerly a macro; thus, it is in the global
+// namespace and has an all-caps name.
+int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;
+
+inline int RUN_ALL_TESTS() {
+  return ::testing::UnitTest::GetInstance()->Run();
+}
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/internal/ceres/householder_vector.h b/internal/ceres/householder_vector.h
new file mode 100644
index 0000000..6d85217
--- /dev/null
+++ b/internal/ceres/householder_vector.h
@@ -0,0 +1,85 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
+#define CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
+
+#include "Eigen/Core"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Algorithm 5.1.1 from 'Matrix Computations' by Golub et al. (Johns Hopkins
+// Studies in Mathematical Sciences) but using the nth element of the input
+// vector as pivot instead of first. This computes the vector v with v(n) = 1
+// and beta such that H = I - beta * v * v^T is orthogonal and
+// H * x = ||x||_2 * e_n.
+template <typename Scalar>
+void ComputeHouseholderVector(const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& x,
+                              Eigen::Matrix<Scalar, Eigen::Dynamic, 1>* v,
+                              Scalar* beta) {
+  CHECK(beta != nullptr);
+  CHECK(v != nullptr);
+  CHECK_GT(x.rows(), 1);
+  CHECK_EQ(x.rows(), v->rows());
+
+  Scalar sigma = x.head(x.rows() - 1).squaredNorm();
+  *v = x;
+  (*v)(v->rows() - 1) = Scalar(1.0);
+
+  *beta = Scalar(0.0);
+  const Scalar& x_pivot = x(x.rows() - 1);
+
+  if (sigma <= Scalar(std::numeric_limits<double>::epsilon())) {
+    if (x_pivot < Scalar(0.0)) {
+      *beta = Scalar(2.0);
+    }
+    return;
+  }
+
+  const Scalar mu = sqrt(x_pivot * x_pivot + sigma);
+  Scalar v_pivot = Scalar(1.0);
+
+  if (x_pivot <= Scalar(0.0)) {
+    v_pivot = x_pivot - mu;
+  } else {
+    v_pivot = -sigma / (x_pivot + mu);
+  }
+
+  *beta = Scalar(2.0) * v_pivot * v_pivot / (sigma + v_pivot * v_pivot);
+
+  v->head(v->rows() - 1) /= v_pivot;
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
diff --git a/internal/ceres/householder_vector_test.cc b/internal/ceres/householder_vector_test.cc
new file mode 100644
index 0000000..fca0360
--- /dev/null
+++ b/internal/ceres/householder_vector_test.cc
@@ -0,0 +1,115 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#include "ceres/householder_vector.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+void HouseholderTestHelper(const Vector& x) {
+  const double kTolerance = 1e-14;
+
+  // Check to ensure that H * x = ||x|| * [0 ... 0 1]'.
+  Vector v(x.rows());
+  double beta;
+  ComputeHouseholderVector(x, &v, &beta);
+  Vector result = x - beta * v * (v.transpose() * x);
+
+  Vector expected_result(x.rows());
+  expected_result.setZero();
+  expected_result(x.rows() - 1) = 1;
+  expected_result *= x.norm();
+
+  for (int i = 0; i < x.rows(); ++i) {
+    EXPECT_NEAR(expected_result[i], result[i], kTolerance);
+  }
+}
+
+TEST(HouseholderVector, ZeroPositive) {
+  Vector x(3);
+  x << 0.0, 0.0, 0.25;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, ZeroNegative) {
+  Vector x(3);
+  x << 0.0, 0.0, -0.25;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, NearZeroPositive) {
+  Vector x(3);
+  x << 1e-18, 1e-18, 0.25;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, NearZeroNegative) {
+  Vector x(3);
+  x << 1e-18, 1e-18, -0.25;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, NonZeroNegative) {
+  Vector x(3);
+  x << 1.0, 0.0, -3.0;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, NonZeroPositive) {
+  Vector x(3);
+  x << 1.0, 1.0, 1.0;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, NonZeroPositive_Size4) {
+  Vector x(4);
+  x << 1.0, 1.0, 0.0, 2.0;
+
+  HouseholderTestHelper(x);
+}
+
+TEST(HouseholderVector, LastElementZero) {
+  Vector x(4);
+  x << 1.0, 1.0, 0.0, 0.0;
+
+  HouseholderTestHelper(x);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
new file mode 100644
index 0000000..bf680d1
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -0,0 +1,224 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/implicit_schur_complement.h"
+
+#include "Eigen/Dense"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+ImplicitSchurComplement::ImplicitSchurComplement(
+    const LinearSolver::Options& options)
+    : options_(options),
+      D_(NULL),
+      b_(NULL) {
+}
+
+ImplicitSchurComplement::~ImplicitSchurComplement() {
+}
+
+void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
+                                   const double* D,
+                                   const double* b) {
+  // Since initialization is reasonably heavy, perhaps we can save on
+  // constructing a new object everytime.
+  if (A_ == NULL) {
+    A_.reset(PartitionedMatrixViewBase::Create(options_, A));
+  }
+
+  D_ = D;
+  b_ = b;
+
+  // Initialize temporary storage and compute the block diagonals of
+  // E'E and F'E.
+  if (block_diagonal_EtE_inverse_ == NULL) {
+    block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
+    if (options_.preconditioner_type == JACOBI) {
+      block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
+    }
+    rhs_.resize(A_->num_cols_f());
+    rhs_.setZero();
+    tmp_rows_.resize(A_->num_rows());
+    tmp_e_cols_.resize(A_->num_cols_e());
+    tmp_e_cols_2_.resize(A_->num_cols_e());
+    tmp_f_cols_.resize(A_->num_cols_f());
+  } else {
+    A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get());
+    if (options_.preconditioner_type == JACOBI) {
+      A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get());
+    }
+  }
+
+  // The block diagonals of the augmented linear system contain
+  // contributions from the diagonal D if it is non-null. Add that to
+  // the block diagonals and invert them.
+  AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
+  if (options_.preconditioner_type == JACOBI) {
+    AddDiagonalAndInvert((D_ ==  NULL) ? NULL : D_ + A_->num_cols_e(),
+                         block_diagonal_FtF_inverse_.get());
+  }
+
+  // Compute the RHS of the Schur complement system.
+  UpdateRhs();
+}
+
+// Evaluate the product
+//
+//   Sx = [F'F - F'E (E'E)^-1 E'F]x
+//
+// By breaking it down into individual matrix vector products
+// involving the matrices E and F. This is implemented using a
+// PartitionedMatrixView of the input matrix A.
+void ImplicitSchurComplement::RightMultiply(const double* x, double* y) const {
+  // y1 = F x
+  tmp_rows_.setZero();
+  A_->RightMultiplyF(x, tmp_rows_.data());
+
+  // y2 = E' y1
+  tmp_e_cols_.setZero();
+  A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+
+  // y3 = -(E'E)^-1 y2
+  tmp_e_cols_2_.setZero();
+  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(),
+                                             tmp_e_cols_2_.data());
+  tmp_e_cols_2_ *= -1.0;
+
+  // y1 = y1 + E y3
+  A_->RightMultiplyE(tmp_e_cols_2_.data(), tmp_rows_.data());
+
+  // y5 = D * x
+  if (D_ != NULL) {
+    ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
+    VectorRef(y, num_cols()) =
+        (Dref.array().square() *
+         ConstVectorRef(x, num_cols()).array()).matrix();
+  } else {
+    VectorRef(y, num_cols()).setZero();
+  }
+
+  // y = y5 + F' y1
+  A_->LeftMultiplyF(tmp_rows_.data(), y);
+}
+
+// Given a block diagonal matrix and an optional array of diagonal
+// entries D, add them to the diagonal of the matrix and compute the
+// inverse of each diagonal block.
+void ImplicitSchurComplement::AddDiagonalAndInvert(
+    const double* D,
+    BlockSparseMatrix* block_diagonal) {
+  const CompressedRowBlockStructure* block_diagonal_structure =
+      block_diagonal->block_structure();
+  for (int r = 0; r < block_diagonal_structure->rows.size(); ++r) {
+    const int row_block_pos = block_diagonal_structure->rows[r].block.position;
+    const int row_block_size = block_diagonal_structure->rows[r].block.size;
+    const Cell& cell = block_diagonal_structure->rows[r].cells[0];
+    MatrixRef m(block_diagonal->mutable_values() + cell.position,
+                row_block_size, row_block_size);
+
+    if (D != NULL) {
+      ConstVectorRef d(D + row_block_pos, row_block_size);
+      m += d.array().square().matrix().asDiagonal();
+    }
+
+    m = m
+        .selfadjointView<Eigen::Upper>()
+        .llt()
+        .solve(Matrix::Identity(row_block_size, row_block_size));
+  }
+}
+
+// Similar to RightMultiply, use the block structure of the matrix A
+// to compute y = (E'E)^-1 (E'b - E'F x).
+void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
+  const int num_cols_e = A_->num_cols_e();
+  const int num_cols_f = A_->num_cols_f();
+  const int num_cols =  A_->num_cols();
+  const int num_rows = A_->num_rows();
+
+  // y1 = F x
+  tmp_rows_.setZero();
+  A_->RightMultiplyF(x, tmp_rows_.data());
+
+  // y2 = b - y1
+  tmp_rows_ = ConstVectorRef(b_, num_rows) - tmp_rows_;
+
+  // y3 = E' y2
+  tmp_e_cols_.setZero();
+  A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+
+  // y = (E'E)^-1 y3
+  VectorRef(y, num_cols).setZero();
+  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y);
+
+  // The full solution vector y has two blocks. The first block of
+  // variables corresponds to the eliminated variables, which we just
+  // computed via back substitution. The second block of variables
+  // corresponds to the Schur complement system, so we just copy those
+  // values from the solution to the Schur complement.
+  VectorRef(y + num_cols_e, num_cols_f) =  ConstVectorRef(x, num_cols_f);
+}
+
+// Compute the RHS of the Schur complement system.
+//
+// rhs = F'b - F'E (E'E)^-1 E'b
+//
+// Like BackSubstitute, we use the block structure of A to implement
+// this using a series of matrix vector products.
+void ImplicitSchurComplement::UpdateRhs() {
+  // y1 = E'b
+  tmp_e_cols_.setZero();
+  A_->LeftMultiplyE(b_, tmp_e_cols_.data());
+
+  // y2 = (E'E)^-1 y1
+  Vector y2 = Vector::Zero(A_->num_cols_e());
+  block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y2.data());
+
+  // y3 = E y2
+  tmp_rows_.setZero();
+  A_->RightMultiplyE(y2.data(), tmp_rows_.data());
+
+  // y3 = b - y3
+  tmp_rows_ = ConstVectorRef(b_, A_->num_rows()) - tmp_rows_;
+
+  // rhs = F' y3
+  rhs_.setZero();
+  A_->LeftMultiplyF(tmp_rows_.data(), rhs_.data());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
new file mode 100644
index 0000000..1fac72c
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement.h
@@ -0,0 +1,167 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// An iterative solver for solving the Schur complement/reduced camera
+// linear system that arise in SfM problems.
+
+#ifndef CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
+#define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
+
+#include <memory>
+#include "ceres/linear_operator.h"
+#include "ceres/linear_solver.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+
+// This class implements various linear algebraic operations related
+// to the Schur complement without explicitly forming it.
+//
+//
+// Given a reactangular linear system Ax = b, where
+//
+//   A = [E F]
+//
+// The normal equations are given by
+//
+//   A'Ax = A'b
+//
+//  |E'E E'F||y| = |E'b|
+//  |F'E F'F||z|   |F'b|
+//
+// and the Schur complement system is given by
+//
+//  [F'F - F'E (E'E)^-1 E'F] z = F'b - F'E (E'E)^-1 E'b
+//
+// Now if we wish to solve Ax = b in the least squares sense, one way
+// is to form this Schur complement system and solve it using
+// Preconditioned Conjugate Gradients.
+//
+// The key operation in a conjugate gradient solver is the evaluation of the
+// matrix vector product with the Schur complement
+//
+//   S = F'F - F'E (E'E)^-1 E'F
+//
+// It is straightforward to see that matrix vector products with S can
+// be evaluated without storing S in memory. Instead, given (E'E)^-1
+// (which for our purposes is an easily inverted block diagonal
+// matrix), it can be done in terms of matrix vector products with E,
+// F and (E'E)^-1. This class implements this functionality and other
+// auxilliary bits needed to implement a CG solver on the Schur
+// complement using the PartitionedMatrixView object.
+//
+// THREAD SAFETY: This class is nqot thread safe. In particular, the
+// RightMultiply (and the LeftMultiply) methods are not thread safe as
+// they depend on mutable arrays used for the temporaries needed to
+// compute the product y += Sx;
+class ImplicitSchurComplement : public LinearOperator {
+ public:
+  // num_eliminate_blocks is the number of E blocks in the matrix
+  // A.
+  //
+  // preconditioner indicates whether the inverse of the matrix F'F
+  // should be computed or not as a preconditioner for the Schur
+  // Complement.
+  //
+  // TODO(sameeragarwal): Get rid of the two bools below and replace
+  // them with enums.
+  explicit ImplicitSchurComplement(const LinearSolver::Options& options);
+  virtual ~ImplicitSchurComplement();
+
+  // Initialize the Schur complement for a linear least squares
+  // problem of the form
+  //
+  //   |A      | x = |b|
+  //   |diag(D)|     |0|
+  //
+  // If D is null, then it is treated as a zero dimensional matrix. It
+  // is important that the matrix A have a BlockStructure object
+  // associated with it and has a block structure that is compatible
+  // with the SchurComplement solver.
+  void Init(const BlockSparseMatrix& A, const double* D, const double* b);
+
+  // y += Sx, where S is the Schur complement.
+  virtual void RightMultiply(const double* x, double* y) const;
+
+  // The Schur complement is a symmetric positive definite matrix,
+  // thus the left and right multiply operators are the same.
+  virtual void LeftMultiply(const double* x, double* y) const {
+    RightMultiply(x, y);
+  }
+
+  // y = (E'E)^-1 (E'b - E'F x). Given an estimate of the solution to
+  // the Schur complement system, this method computes the value of
+  // the e_block variables that were eliminated to form the Schur
+  // complement.
+  void BackSubstitute(const double* x, double* y);
+
+  virtual int num_rows() const { return A_->num_cols_f(); }
+  virtual int num_cols() const { return A_->num_cols_f(); }
+  const Vector& rhs()    const { return rhs_;             }
+
+  const BlockSparseMatrix* block_diagonal_EtE_inverse() const {
+    return block_diagonal_EtE_inverse_.get();
+  }
+
+  const BlockSparseMatrix* block_diagonal_FtF_inverse() const {
+    return block_diagonal_FtF_inverse_.get();
+  }
+
+ private:
+  void AddDiagonalAndInvert(const double* D, BlockSparseMatrix* matrix);
+  void UpdateRhs();
+
+  const LinearSolver::Options& options_;
+
+  std::unique_ptr<PartitionedMatrixViewBase> A_;
+  const double* D_;
+  const double* b_;
+
+  std::unique_ptr<BlockSparseMatrix> block_diagonal_EtE_inverse_;
+  std::unique_ptr<BlockSparseMatrix> block_diagonal_FtF_inverse_;
+
+  Vector rhs_;
+
+  // Temporary storage vectors used to implement RightMultiply.
+  mutable Vector tmp_rows_;
+  mutable Vector tmp_e_cols_;
+  mutable Vector tmp_e_cols_2_;
+  mutable Vector tmp_f_cols_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
new file mode 100644
index 0000000..3beb386
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -0,0 +1,205 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/implicit_schur_complement.h"
+
+#include <cstddef>
+#include <memory>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::AssertionResult;
+
+const double kEpsilon = 1e-14;
+
+class ImplicitSchurComplementTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(2));
+
+    CHECK(problem != nullptr);
+    A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    b_.reset(problem->b.release());
+    D_.reset(problem->D.release());
+
+    num_cols_ = A_->num_cols();
+    num_rows_ = A_->num_rows();
+    num_eliminate_blocks_ = problem->num_eliminate_blocks;
+  }
+
+  void ReducedLinearSystemAndSolution(double* D,
+                                      Matrix* lhs,
+                                      Vector* rhs,
+                                      Vector* solution) {
+    const CompressedRowBlockStructure* bs = A_->block_structure();
+    const int num_col_blocks = bs->cols.size();
+    std::vector<int> blocks(num_col_blocks - num_eliminate_blocks_, 0);
+    for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+      blocks[i - num_eliminate_blocks_] = bs->cols[i].size;
+    }
+
+    BlockRandomAccessDenseMatrix blhs(blocks);
+    const int num_schur_rows = blhs.num_rows();
+
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    options.type = DENSE_SCHUR;
+    ContextImpl context;
+    options.context = &context;
+
+    std::unique_ptr<SchurEliminatorBase> eliminator(
+        SchurEliminatorBase::Create(options));
+    CHECK(eliminator != nullptr);
+    const bool kFullRankETE = true;
+    eliminator->Init(num_eliminate_blocks_, kFullRankETE, bs);
+
+    lhs->resize(num_schur_rows, num_schur_rows);
+    rhs->resize(num_schur_rows);
+
+    eliminator->Eliminate(A_.get(), b_.get(), D, &blhs, rhs->data());
+
+    MatrixRef lhs_ref(blhs.mutable_values(), num_schur_rows, num_schur_rows);
+
+    // lhs_ref is an upper triangular matrix. Construct a full version
+    // of lhs_ref in lhs by transposing lhs_ref, choosing the strictly
+    // lower triangular part of the matrix and adding it to lhs_ref.
+    *lhs = lhs_ref;
+    lhs->triangularView<Eigen::StrictlyLower>() =
+        lhs_ref.triangularView<Eigen::StrictlyUpper>().transpose();
+
+    solution->resize(num_cols_);
+    solution->setZero();
+    VectorRef schur_solution(solution->data() + num_cols_ - num_schur_rows,
+                             num_schur_rows);
+    schur_solution = lhs->selfadjointView<Eigen::Upper>().llt().solve(*rhs);
+    eliminator->BackSubstitute(A_.get(), b_.get(), D,
+                               schur_solution.data(), solution->data());
+  }
+
+  AssertionResult TestImplicitSchurComplement(double* D) {
+    Matrix lhs;
+    Vector rhs;
+    Vector reference_solution;
+    ReducedLinearSystemAndSolution(D, &lhs, &rhs, &reference_solution);
+
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    options.preconditioner_type = JACOBI;
+    ContextImpl context;
+    options.context = &context;
+    ImplicitSchurComplement isc(options);
+    isc.Init(*A_, D, b_.get());
+
+    int num_sc_cols = lhs.cols();
+
+    for (int i = 0; i < num_sc_cols; ++i) {
+      Vector x(num_sc_cols);
+      x.setZero();
+      x(i) = 1.0;
+
+      Vector y(num_sc_cols);
+      y = lhs * x;
+
+      Vector z(num_sc_cols);
+      isc.RightMultiply(x.data(), z.data());
+
+      // The i^th column of the implicit schur complement is the same as
+      // the explicit schur complement.
+      if ((y - z).norm() > kEpsilon) {
+        return testing::AssertionFailure()
+            << "Explicit and Implicit SchurComplements differ in "
+            << "column " << i << ". explicit: " << y.transpose()
+            << " implicit: " << z.transpose();
+      }
+    }
+
+    // Compare the rhs of the reduced linear system
+    if ((isc.rhs() - rhs).norm() > kEpsilon) {
+      return testing::AssertionFailure()
+            << "Explicit and Implicit SchurComplements differ in "
+            << "rhs. explicit: " << rhs.transpose()
+            << " implicit: " << isc.rhs().transpose();
+    }
+
+    // Reference solution to the f_block.
+    const Vector reference_f_sol =
+        lhs.selfadjointView<Eigen::Upper>().llt().solve(rhs);
+
+    // Backsubstituted solution from the implicit schur solver using the
+    // reference solution to the f_block.
+    Vector sol(num_cols_);
+    isc.BackSubstitute(reference_f_sol.data(), sol.data());
+    if ((sol - reference_solution).norm() > kEpsilon) {
+      return testing::AssertionFailure()
+          << "Explicit and Implicit SchurComplements solutions differ. "
+          << "explicit: " << reference_solution.transpose()
+          << " implicit: " << sol.transpose();
+    }
+
+    return testing::AssertionSuccess();
+  }
+
+  int num_rows_;
+  int num_cols_;
+  int num_eliminate_blocks_;
+
+  std::unique_ptr<BlockSparseMatrix> A_;
+  std::unique_ptr<double[]> b_;
+  std::unique_ptr<double[]> D_;
+};
+
+// Verify that the Schur Complement matrix implied by the
+// ImplicitSchurComplement class matches the one explicitly computed
+// by the SchurComplement solver.
+//
+// We do this with and without regularization to check that the
+// support for the LM diagonal is correct.
+TEST_F(ImplicitSchurComplementTest, SchurMatrixValuesTest) {
+  EXPECT_TRUE(TestImplicitSchurComplement(NULL));
+  EXPECT_TRUE(TestImplicitSchurComplement(D_.get()));
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/inner_product_computer.cc b/internal/ceres/inner_product_computer.cc
new file mode 100644
index 0000000..2bf8836
--- /dev/null
+++ b/internal/ceres/inner_product_computer.cc
@@ -0,0 +1,330 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/inner_product_computer.h"
+
+#include <algorithm>
+#include "ceres/small_blas.h"
+
+namespace ceres {
+namespace internal {
+
+
+// Create the CompressedRowSparseMatrix matrix that will contain the
+// inner product.
+//
+// storage_type controls whether the result matrix contains the upper
+// or the lower triangular part of the product.
+//
+// num_nonzeros is the number of non-zeros in the result matrix.
+CompressedRowSparseMatrix* InnerProductComputer::CreateResultMatrix(
+    const CompressedRowSparseMatrix::StorageType storage_type,
+    const int num_nonzeros) {
+  CompressedRowSparseMatrix* matrix =
+      new CompressedRowSparseMatrix(m_.num_cols(), m_.num_cols(), num_nonzeros);
+  matrix->set_storage_type(storage_type);
+
+  const CompressedRowBlockStructure* bs = m_.block_structure();
+  const std::vector<Block>& blocks = bs->cols;
+  matrix->mutable_row_blocks()->resize(blocks.size());
+  matrix->mutable_col_blocks()->resize(blocks.size());
+  for (int i = 0; i < blocks.size(); ++i) {
+    (*(matrix->mutable_row_blocks()))[i] = blocks[i].size;
+    (*(matrix->mutable_col_blocks()))[i] = blocks[i].size;
+  }
+
+  return matrix;
+}
+
+// Given the set of product terms in the inner product, return the
+// total number of non-zeros in the result and for each row block of
+// the result matrix, compute the number of non-zeros in any one row
+// of the row block.
+int InnerProductComputer::ComputeNonzeros(
+    const std::vector<InnerProductComputer::ProductTerm>& product_terms,
+    std::vector<int>* row_nnz) {
+  const CompressedRowBlockStructure* bs = m_.block_structure();
+  const std::vector<Block>& blocks = bs->cols;
+
+  row_nnz->resize(blocks.size());
+  std::fill(row_nnz->begin(), row_nnz->end(), 0);
+
+  // First product term.
+  (*row_nnz)[product_terms[0].row] = blocks[product_terms[0].col].size;
+  int num_nonzeros =
+      blocks[product_terms[0].row].size * blocks[product_terms[0].col].size;
+
+  // Remaining product terms.
+  for (int i = 1; i < product_terms.size(); ++i) {
+    const ProductTerm& previous = product_terms[i - 1];
+    const ProductTerm& current = product_terms[i];
+
+    // Each (row, col) block counts only once.
+    // This check depends on product sorted on (row, col).
+    if (current.row != previous.row || current.col != previous.col) {
+      (*row_nnz)[current.row] += blocks[current.col].size;
+      num_nonzeros += blocks[current.row].size * blocks[current.col].size;
+    }
+  }
+
+  return num_nonzeros;
+}
+
+InnerProductComputer::InnerProductComputer(const BlockSparseMatrix& m,
+                                           const int start_row_block,
+                                           const int end_row_block)
+    : m_(m), start_row_block_(start_row_block), end_row_block_(end_row_block) {}
+
+// Compute the sparsity structure of the product m.transpose() * m
+// and create a CompressedRowSparseMatrix corresponding to it.
+//
+// Also compute the "program" vector, which for every term in the
+// block outer product provides the information for the entry in the
+// values array of the result matrix where it should be accumulated.
+//
+// Since the entries of the program are the same for rows with the
+// same sparsity structure, the program only stores the result for one
+// row per row block. The Compute function reuses this information for
+// each row in the row block.
+//
+// product_storage_type controls the form of the output matrix. It
+// can be LOWER_TRIANGULAR or UPPER_TRIANGULAR.
+InnerProductComputer* InnerProductComputer::Create(
+    const BlockSparseMatrix& m,
+    CompressedRowSparseMatrix::StorageType product_storage_type) {
+  return InnerProductComputer::Create(
+      m, 0, m.block_structure()->rows.size(), product_storage_type);
+}
+
+InnerProductComputer* InnerProductComputer::Create(
+    const BlockSparseMatrix& m,
+    const int start_row_block,
+    const int end_row_block,
+    CompressedRowSparseMatrix::StorageType product_storage_type) {
+  CHECK(product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR ||
+        product_storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+  CHECK_GT(m.num_nonzeros(), 0)
+      << "Congratulations, you found a bug in Ceres. Please report it.";
+  InnerProductComputer* inner_product_computer =
+      new InnerProductComputer(m, start_row_block, end_row_block);
+  inner_product_computer->Init(product_storage_type);
+  return inner_product_computer;
+}
+
+void InnerProductComputer::Init(
+    const CompressedRowSparseMatrix::StorageType product_storage_type) {
+  std::vector<InnerProductComputer::ProductTerm> product_terms;
+  const CompressedRowBlockStructure* bs = m_.block_structure();
+
+  // Give input matrix m in Block Sparse format
+  //     (row_block, col_block)
+  // represent each block multiplication
+  //     (row_block, col_block1)' X (row_block, col_block2)
+  // by its product term:
+  //     (col_block1, col_block2, index)
+  for (int row_block = start_row_block_; row_block < end_row_block_;
+       ++row_block) {
+    const CompressedRow& row = bs->rows[row_block];
+    for (int c1 = 0; c1 < row.cells.size(); ++c1) {
+      const Cell& cell1 = row.cells[c1];
+      int c2_begin, c2_end;
+      if (product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+        c2_begin = 0;
+        c2_end = c1 + 1;
+      } else {
+        c2_begin = c1;
+        c2_end = row.cells.size();
+      }
+
+      for (int c2 = c2_begin; c2 < c2_end; ++c2) {
+        const Cell& cell2 = row.cells[c2];
+        product_terms.push_back(InnerProductComputer::ProductTerm(
+            cell1.block_id, cell2.block_id, product_terms.size()));
+      }
+    }
+  }
+
+  std::sort(product_terms.begin(), product_terms.end());
+  ComputeOffsetsAndCreateResultMatrix(product_storage_type, product_terms);
+}
+
+void InnerProductComputer::ComputeOffsetsAndCreateResultMatrix(
+    const CompressedRowSparseMatrix::StorageType product_storage_type,
+    const std::vector<InnerProductComputer::ProductTerm>& product_terms) {
+  const std::vector<Block>& col_blocks = m_.block_structure()->cols;
+
+  std::vector<int> row_block_nnz;
+  const int num_nonzeros = ComputeNonzeros(product_terms, &row_block_nnz);
+
+  result_.reset(CreateResultMatrix(product_storage_type, num_nonzeros));
+
+  // Populate the row non-zero counts in the result matrix.
+  int* crsm_rows = result_->mutable_rows();
+  crsm_rows[0] = 0;
+  for (int i = 0; i < col_blocks.size(); ++i) {
+    for (int j = 0; j < col_blocks[i].size; ++j, ++crsm_rows) {
+      *(crsm_rows + 1) = *crsm_rows + row_block_nnz[i];
+    }
+  }
+
+  // The following macro FILL_CRSM_COL_BLOCK is key to understanding
+  // how this class works.
+  //
+  // It does two things.
+  //
+  // Sets the value for the current term in the result_offsets_ array
+  // and populates the cols array of the result matrix.
+  //
+  // row_block and col_block as the names imply, refer to the row and
+  // column blocks of the current term.
+  //
+  // row_nnz is the number of nonzeros in the result_matrix at the
+  // beginning of the first row of row_block.
+  //
+  // col_nnz is the number of nonzeros in the first row of the row
+  // block that occur before the current column block, i.e. this is
+  // sum of the sizes of all the column blocks in this row block that
+  // came before this column block.
+  //
+  // Given these two numbers and the total number of nonzeros in this
+  // row (nnz_in_row), we can now populate the cols array as follows:
+  //
+  // nnz + j * nnz_in_row is the beginning of the j^th row.
+  //
+  // nnz + j * nnz_in_row + col_nnz is the beginning of the column
+  // block in the j^th row.
+  //
+  // nnz + j * nnz_in_row + col_nnz + k is then the j^th row and the
+  // k^th column of the product block, whose value is
+  //
+  // col_blocks[col_block].position + k, which is the column number of
+  // the k^th column of the current column block.
+#define FILL_CRSM_COL_BLOCK                                \
+  const int row_block = current->row;                      \
+  const int col_block = current->col;                      \
+  const int nnz_in_row = row_block_nnz[row_block];         \
+  int* crsm_cols = result_->mutable_cols();                \
+  result_offsets_[current->index] = nnz + col_nnz;         \
+  for (int j = 0; j < col_blocks[row_block].size; ++j) {   \
+    for (int k = 0; k < col_blocks[col_block].size; ++k) { \
+      crsm_cols[nnz + j * nnz_in_row + col_nnz + k] =      \
+          col_blocks[col_block].position + k;              \
+    }                                                      \
+  }
+
+  result_offsets_.resize(product_terms.size());
+  int col_nnz = 0;
+  int nnz = 0;
+
+  // Process the first term.
+  const InnerProductComputer::ProductTerm* current = &product_terms[0];
+  FILL_CRSM_COL_BLOCK;
+
+  // Process the rest of the terms.
+  for (int i = 1; i < product_terms.size(); ++i) {
+    current = &product_terms[i];
+    const InnerProductComputer::ProductTerm* previous = &product_terms[i - 1];
+
+    // If the current term is the same as the previous term, then it
+    // stores its product at the same location as the previous term.
+    if (previous->row == current->row && previous->col == current->col) {
+      result_offsets_[current->index] = result_offsets_[previous->index];
+      continue;
+    }
+
+    if (previous->row == current->row) {
+      // if the current and previous terms are in the same row block,
+      // then they differ in the column block, in which case advance
+      // col_nnz by the column size of the prevous term.
+      col_nnz += col_blocks[previous->col].size;
+    } else {
+      // If we have moved to a new row-block , then col_nnz is zero,
+      // and nnz is set to the beginning of the row block.
+      col_nnz = 0;
+      nnz += row_block_nnz[previous->row] * col_blocks[previous->row].size;
+    }
+
+    FILL_CRSM_COL_BLOCK;
+  }
+}
+
+// Use the results_offsets_ array to numerically compute the product
+// m' * m and store it in result_.
+//
+// TODO(sameeragarwal): Multithreading support.
+void InnerProductComputer::Compute() {
+  const double* m_values = m_.values();
+  const CompressedRowBlockStructure* bs = m_.block_structure();
+
+  const CompressedRowSparseMatrix::StorageType storage_type =
+      result_->storage_type();
+  result_->SetZero();
+  double* values = result_->mutable_values();
+  const int* rows = result_->rows();
+  int cursor = 0;
+
+  // Iterate row blocks.
+  for (int r = start_row_block_; r < end_row_block_; ++r) {
+    const CompressedRow& m_row = bs->rows[r];
+    for (int c1 = 0; c1 < m_row.cells.size(); ++c1) {
+      const Cell& cell1 = m_row.cells[c1];
+      const int c1_size = bs->cols[cell1.block_id].size;
+      const int row_nnz = rows[bs->cols[cell1.block_id].position + 1] -
+          rows[bs->cols[cell1.block_id].position];
+
+      int c2_begin, c2_end;
+      if (storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+        c2_begin = 0;
+        c2_end = c1 + 1;
+      } else {
+        c2_begin = c1;
+        c2_end = m_row.cells.size();
+      }
+
+      for (int c2 = c2_begin; c2 < c2_end; ++c2, ++cursor) {
+        const Cell& cell2 = m_row.cells[c2];
+        const int c2_size = bs->cols[cell2.block_id].size;
+        MatrixTransposeMatrixMultiply<Eigen::Dynamic, Eigen::Dynamic,
+                                      Eigen::Dynamic, Eigen::Dynamic, 1>(
+                                          m_values + cell1.position,
+                                          m_row.block.size, c1_size,
+                                          m_values + cell2.position,
+                                          m_row.block.size, c2_size,
+                                          values + result_offsets_[cursor],
+                                          0, 0, c1_size, row_nnz);
+      }
+    }
+  }
+
+  CHECK_EQ(cursor, result_offsets_.size());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/inner_product_computer.h b/internal/ceres/inner_product_computer.h
new file mode 100644
index 0000000..73073f8
--- /dev/null
+++ b/internal/ceres/inner_product_computer.h
@@ -0,0 +1,157 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
+#define CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
+
+#include <memory>
+#include <vector>
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+// This class is used to repeatedly compute the inner product
+//
+//   result = m' * m
+//
+// where the sparsity structure of m remains constant across calls.
+//
+// Upon creation, the class computes and caches information needed to
+// compute v, and then uses it to efficiently compute the product
+// every time InnerProductComputer::Compute is called.
+//
+// See sparse_normal_cholesky_solver.cc for example usage.
+//
+// Note that the result matrix is a block upper or lower-triangular
+// matrix, i.e., it will contain entries in the upper or lower
+// triangular part of the matrix corresponding to the block that occur
+// along its diagonal.
+//
+// This is not a problem as sparse linear algebra libraries can ignore
+// these entries with ease and the space used is minimal/linear in the
+// size of the matrices.
+class InnerProductComputer {
+ public:
+  // Factory
+  //
+  // m is the input matrix
+  //
+  // Since m' * m is a symmetric matrix, we only compute half of the
+  // matrix and the value of storage_type which must be
+  // UPPER_TRIANGULAR or LOWER_TRIANGULAR determines which half is
+  // computed.
+  //
+  // The user must ensure that the matrix m is valid for the life time
+  // of this object.
+  static InnerProductComputer* Create(
+      const BlockSparseMatrix& m,
+      CompressedRowSparseMatrix::StorageType storage_type);
+
+  // This factory method allows the user control over range of row
+  // blocks of m that should be used to compute the inner product.
+  //
+  // a = m(start_row_block : end_row_block, :);
+  // result = a' * a;
+  static InnerProductComputer* Create(
+      const BlockSparseMatrix& m,
+      int start_row_block,
+      int end_row_block,
+      CompressedRowSparseMatrix::StorageType storage_type);
+
+  // Update result_ to be numerically equal to m' * m.
+  void Compute();
+
+  // Accessors for the result containing the inner product.
+  //
+  // Compute must be called before accessing this result for
+  // the first time.
+  const CompressedRowSparseMatrix& result() const { return *result_; }
+  CompressedRowSparseMatrix* mutable_result() const { return result_.get(); }
+
+ private:
+  // A ProductTerm is a term in the block inner product of a matrix
+  // with itself.
+  struct ProductTerm {
+    ProductTerm(const int row, const int col, const int index)
+        : row(row), col(col), index(index) {}
+
+    bool operator<(const ProductTerm& right) const {
+      if (row == right.row) {
+        if (col == right.col) {
+          return index < right.index;
+        }
+        return col < right.col;
+      }
+      return row < right.row;
+    }
+
+    int row;
+    int col;
+    int index;
+  };
+
+  InnerProductComputer(const BlockSparseMatrix& m,
+                       int start_row_block,
+                       int end_row_block);
+
+  void Init(CompressedRowSparseMatrix::StorageType storage_type);
+
+  CompressedRowSparseMatrix* CreateResultMatrix(
+      const CompressedRowSparseMatrix::StorageType storage_type,
+      int num_nonzeros);
+
+  int ComputeNonzeros(const std::vector<ProductTerm>& product_terms,
+                      std::vector<int>* row_block_nnz);
+
+  void ComputeOffsetsAndCreateResultMatrix(
+      const CompressedRowSparseMatrix::StorageType storage_type,
+      const std::vector<ProductTerm>& product_terms);
+
+  const BlockSparseMatrix& m_;
+  const int start_row_block_;
+  const int end_row_block_;
+  std::unique_ptr<CompressedRowSparseMatrix> result_;
+
+  // For each term in the inner product, result_offsets_ contains the
+  // location in the values array of the result_ matrix where it
+  // should be stored.
+  //
+  // This is the principal look up table that allows this class to
+  // compute the inner product fast.
+  std::vector<int> result_offsets_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
diff --git a/internal/ceres/inner_product_computer_test.cc b/internal/ceres/inner_product_computer_test.cc
new file mode 100644
index 0000000..31cd829
--- /dev/null
+++ b/internal/ceres/inner_product_computer_test.cc
@@ -0,0 +1,226 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/inner_product_computer.h"
+
+#include <memory>
+#include <numeric>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/random.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#include "Eigen/SparseCore"
+
+namespace ceres {
+namespace internal {
+
+#define COMPUTE_AND_COMPARE                                                  \
+  {                                                                          \
+    inner_product_computer->Compute();                                       \
+    CompressedRowSparseMatrix* actual_product_crsm =                         \
+        inner_product_computer->mutable_result();                            \
+    Matrix actual_inner_product =                                            \
+        Eigen::MappedSparseMatrix<double, Eigen::ColMajor>(                  \
+            actual_product_crsm->num_rows(),                                 \
+            actual_product_crsm->num_rows(),                                 \
+            actual_product_crsm->num_nonzeros(),                             \
+            actual_product_crsm->mutable_rows(),                             \
+            actual_product_crsm->mutable_cols(),                             \
+            actual_product_crsm->mutable_values());                          \
+    EXPECT_EQ(actual_inner_product.rows(), actual_inner_product.cols());     \
+    EXPECT_EQ(expected_inner_product.rows(), expected_inner_product.cols()); \
+    EXPECT_EQ(actual_inner_product.rows(), expected_inner_product.rows());   \
+    Matrix expected_t, actual_t;                                             \
+    if (actual_product_crsm->storage_type() ==                               \
+        CompressedRowSparseMatrix::LOWER_TRIANGULAR) {                       \
+      expected_t = expected_inner_product.triangularView<Eigen::Upper>();    \
+      actual_t = actual_inner_product.triangularView<Eigen::Upper>();        \
+    } else {                                                                 \
+      expected_t = expected_inner_product.triangularView<Eigen::Lower>();    \
+      actual_t = actual_inner_product.triangularView<Eigen::Lower>();        \
+    }                                                                        \
+    EXPECT_LE((expected_t - actual_t).norm() / actual_t.norm(),              \
+              100 * std::numeric_limits<double>::epsilon())                  \
+        << "expected: \n"                                                    \
+        << expected_t << "\nactual: \n"                                      \
+        << actual_t;                                                         \
+  }
+
+TEST(InnerProductComputer, NormalOperation) {
+  // "Randomly generated seed."
+  SetRandomState(29823);
+  const int kMaxNumRowBlocks = 10;
+  const int kMaxNumColBlocks = 10;
+  const int kNumTrials = 10;
+
+  // Create a random matrix, compute its outer product using Eigen and
+  // ComputeOuterProduct. Convert both matrices to dense matrices and
+  // compare their upper triangular parts.
+  for (int num_row_blocks = 1; num_row_blocks < kMaxNumRowBlocks;
+       ++num_row_blocks) {
+    for (int num_col_blocks = 1; num_col_blocks < kMaxNumColBlocks;
+         ++num_col_blocks) {
+      for (int trial = 0; trial < kNumTrials; ++trial) {
+        BlockSparseMatrix::RandomMatrixOptions options;
+        options.num_row_blocks = num_row_blocks;
+        options.num_col_blocks = num_col_blocks;
+        options.min_row_block_size = 1;
+        options.max_row_block_size = 5;
+        options.min_col_block_size = 1;
+        options.max_col_block_size = 10;
+        options.block_density = std::max(0.1, RandDouble());
+
+        VLOG(2) << "num row blocks: " << options.num_row_blocks;
+        VLOG(2) << "num col blocks: " << options.num_col_blocks;
+        VLOG(2) << "min row block size: " << options.min_row_block_size;
+        VLOG(2) << "max row block size: " << options.max_row_block_size;
+        VLOG(2) << "min col block size: " << options.min_col_block_size;
+        VLOG(2) << "max col block size: " << options.max_col_block_size;
+        VLOG(2) << "block density: " << options.block_density;
+
+        std::unique_ptr<BlockSparseMatrix> random_matrix(
+            BlockSparseMatrix::CreateRandomMatrix(options));
+
+        TripletSparseMatrix tsm(random_matrix->num_rows(),
+                                random_matrix->num_cols(),
+                                random_matrix->num_nonzeros());
+        random_matrix->ToTripletSparseMatrix(&tsm);
+        std::vector<Eigen::Triplet<double>> triplets;
+        for (int i = 0; i < tsm.num_nonzeros(); ++i) {
+          triplets.push_back(Eigen::Triplet<double>(
+              tsm.rows()[i], tsm.cols()[i], tsm.values()[i]));
+        }
+        Eigen::SparseMatrix<double> eigen_random_matrix(
+            random_matrix->num_rows(), random_matrix->num_cols());
+        eigen_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
+        Matrix expected_inner_product =
+            eigen_random_matrix.transpose() * eigen_random_matrix;
+
+        std::unique_ptr<InnerProductComputer> inner_product_computer;
+
+        inner_product_computer.reset(InnerProductComputer::Create(
+            *random_matrix, CompressedRowSparseMatrix::LOWER_TRIANGULAR));
+        COMPUTE_AND_COMPARE;
+        inner_product_computer.reset(InnerProductComputer::Create(
+            *random_matrix, CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+        COMPUTE_AND_COMPARE;
+
+      }
+    }
+  }
+}
+
+TEST(InnerProductComputer, SubMatrix) {
+  // "Randomly generated seed."
+  SetRandomState(29823);
+  const int kNumRowBlocks = 10;
+  const int kNumColBlocks = 20;
+  const int kNumTrials = 5;
+
+  // Create a random matrix, compute its outer product using Eigen and
+  // ComputeInnerProductComputer. Convert both matrices to dense matrices and
+  // compare their upper triangular parts.
+  for (int trial = 0; trial < kNumTrials; ++trial) {
+    BlockSparseMatrix::RandomMatrixOptions options;
+    options.num_row_blocks = kNumRowBlocks;
+    options.num_col_blocks = kNumColBlocks;
+    options.min_row_block_size = 1;
+    options.max_row_block_size = 5;
+    options.min_col_block_size = 1;
+    options.max_col_block_size = 10;
+    options.block_density = std::max(0.1, RandDouble());
+
+    VLOG(2) << "num row blocks: " << options.num_row_blocks;
+    VLOG(2) << "num col blocks: " << options.num_col_blocks;
+    VLOG(2) << "min row block size: " << options.min_row_block_size;
+    VLOG(2) << "max row block size: " << options.max_row_block_size;
+    VLOG(2) << "min col block size: " << options.min_col_block_size;
+    VLOG(2) << "max col block size: " << options.max_col_block_size;
+    VLOG(2) << "block density: " << options.block_density;
+
+    std::unique_ptr<BlockSparseMatrix> random_matrix(
+        BlockSparseMatrix::CreateRandomMatrix(options));
+
+    const std::vector<CompressedRow>& row_blocks =
+        random_matrix->block_structure()->rows;
+    const int num_row_blocks = row_blocks.size();
+
+    for (int start_row_block = 0; start_row_block < num_row_blocks - 1;
+         ++start_row_block) {
+      for (int end_row_block = start_row_block + 1;
+           end_row_block < num_row_blocks;
+           ++end_row_block) {
+        const int start_row = row_blocks[start_row_block].block.position;
+        const int end_row = row_blocks[end_row_block].block.position;
+
+        TripletSparseMatrix tsm(random_matrix->num_rows(),
+                                random_matrix->num_cols(),
+                                random_matrix->num_nonzeros());
+        random_matrix->ToTripletSparseMatrix(&tsm);
+        std::vector<Eigen::Triplet<double>> triplets;
+        for (int i = 0; i < tsm.num_nonzeros(); ++i) {
+          if (tsm.rows()[i] >= start_row && tsm.rows()[i] < end_row) {
+            triplets.push_back(Eigen::Triplet<double>(
+                tsm.rows()[i], tsm.cols()[i], tsm.values()[i]));
+          }
+        }
+
+        Eigen::SparseMatrix<double> eigen_random_matrix(
+            random_matrix->num_rows(), random_matrix->num_cols());
+        eigen_random_matrix.setFromTriplets(triplets.begin(), triplets.end());
+
+        Matrix expected_inner_product =
+            eigen_random_matrix.transpose() * eigen_random_matrix;
+
+        std::unique_ptr<InnerProductComputer> inner_product_computer;
+        inner_product_computer.reset(InnerProductComputer::Create(
+            *random_matrix,
+            start_row_block,
+            end_row_block,
+            CompressedRowSparseMatrix::LOWER_TRIANGULAR));
+        COMPUTE_AND_COMPARE;
+        inner_product_computer.reset(InnerProductComputer::Create(
+            *random_matrix,
+            start_row_block,
+            end_row_block,
+            CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+        COMPUTE_AND_COMPARE;
+
+      }
+    }
+  }
+}
+
+#undef COMPUTE_AND_COMPARE
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/integer_sequence_algorithm_test.cc b/internal/ceres/integer_sequence_algorithm_test.cc
new file mode 100644
index 0000000..a6c85d0
--- /dev/null
+++ b/internal/ceres/integer_sequence_algorithm_test.cc
@@ -0,0 +1,71 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+
+#include "ceres/internal/integer_sequence_algorithm.h"
+
+#include <type_traits>
+
+namespace ceres {
+namespace internal {
+
+// Unit tests for summation of integer sequence.
+static_assert(Sum<integer_sequence<int>>::Value == 0,
+              "Unit test of summing up an integer sequence failed.");
+static_assert(Sum<integer_sequence<int, 2>>::Value == 2,
+              "Unit test of summing up an integer sequence failed.");
+static_assert(Sum<integer_sequence<int, 2, 3>>::Value == 5,
+              "Unit test of summing up an integer sequence failed.");
+static_assert(Sum<integer_sequence<int, 2, 3, 10>>::Value == 15,
+              "Unit test of summing up an integer sequence failed.");
+static_assert(Sum<integer_sequence<int, 2, 3, 10, 4>>::Value == 19,
+              "Unit test of summing up an integer sequence failed.");
+static_assert(Sum<integer_sequence<int, 2, 3, 10, 4, 1>>::Value == 20,
+              "Unit test of summing up an integer sequence failed.");
+
+// Unit tests for exclusive scan of integer sequence.
+static_assert(std::is_same<ExclusiveScan<integer_sequence<int>>,
+                           integer_sequence<int>>::value,
+              "Unit test of calculating the exclusive scan of an integer "
+              "sequence failed.");
+static_assert(std::is_same<ExclusiveScan<integer_sequence<int, 2>>,
+                           integer_sequence<int, 0>>::value,
+              "Unit test of calculating the exclusive scan of an integer "
+              "sequence failed.");
+static_assert(std::is_same<ExclusiveScan<integer_sequence<int, 2, 1>>,
+                           integer_sequence<int, 0, 2>>::value,
+              "Unit test of calculating the exclusive scan of an integer "
+              "sequence failed.");
+static_assert(std::is_same<ExclusiveScan<integer_sequence<int, 2, 1, 10>>,
+                           integer_sequence<int, 0, 2, 3>>::value,
+              "Unit test of calculating the exclusive scan of an integer "
+              "sequence failed.");
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/integer_sequence_test.cc b/internal/ceres/integer_sequence_test.cc
new file mode 100644
index 0000000..ab3559a
--- /dev/null
+++ b/internal/ceres/integer_sequence_test.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+
+#include "ceres/internal/integer_sequence.h"
+
+#include <type_traits>
+
+namespace ceres {
+namespace internal {
+
+// Unit test for integer_sequence<...>::value_type
+static_assert(std::is_same<integer_sequence<unsigned int, 0>::value_type,
+                           unsigned int>::value,
+              "Unit test of integer sequence value type failed.");
+
+// Unit tests for make_integer_sequence
+static_assert(
+    std::is_same<make_integer_sequence<int, 0>, integer_sequence<int>>::value,
+    "Unit test of make integer sequence failed.");
+static_assert(std::is_same<make_integer_sequence<int, 1>,
+                           integer_sequence<int, 0>>::value,
+              "Unit test of make integer sequence failed.");
+static_assert(std::is_same<make_integer_sequence<int, 2>,
+                           integer_sequence<int, 0, 1>>::value,
+              "Unit test of make integer sequence failed.");
+static_assert(std::is_same<make_integer_sequence<int, 3>,
+                           integer_sequence<int, 0, 1, 2>>::value,
+              "Unit test of make integer sequence failed.");
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/invert_psd_matrix.h b/internal/ceres/invert_psd_matrix.h
new file mode 100644
index 0000000..2319fea
--- /dev/null
+++ b/internal/ceres/invert_psd_matrix.h
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+#define CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "Eigen/Dense"
+
+namespace ceres {
+namespace internal {
+
+// Helper routine to compute the inverse or pseudo-inverse of a
+// symmetric positive semi-definite matrix.
+//
+// assume_full_rank controls whether a Cholesky factorization or an
+// Singular Value Decomposition is used to compute the inverse and the
+// pseudo-inverse respectively.
+//
+// The template parameter kSize can either be Eigen::Dynamic or a
+// positive integer equal to the number of rows of m.
+template <int kSize>
+typename EigenTypes<kSize, kSize>::Matrix InvertPSDMatrix(
+    const bool assume_full_rank,
+    const typename EigenTypes<kSize, kSize>::Matrix& m) {
+  const int size = m.rows();
+
+  // If the matrix can be assumed to be full rank, then just use the
+  // Cholesky factorization to invert it.
+  if (assume_full_rank) {
+    return m.template selfadjointView<Eigen::Upper>().llt().solve(
+        Matrix::Identity(size, size));
+  }
+
+  Eigen::JacobiSVD<Matrix> svd(m, Eigen::ComputeThinU | Eigen::ComputeThinV);
+  const double tolerance =
+      std::numeric_limits<double>::epsilon() * size * svd.singularValues()(0);
+
+  return svd.matrixV() *
+         (svd.singularValues().array() > tolerance)
+             .select(svd.singularValues().array().inverse(), 0)
+             .matrix()
+             .asDiagonal() *
+         svd.matrixU().adjoint();
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
diff --git a/internal/ceres/invert_psd_matrix_test.cc b/internal/ceres/invert_psd_matrix_test.cc
new file mode 100644
index 0000000..5da9c11
--- /dev/null
+++ b/internal/ceres/invert_psd_matrix_test.cc
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/invert_psd_matrix.h"
+
+#include "ceres/internal/eigen.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+static const bool kFullRank = true;
+static const bool kRankDeficient = false;
+
+template <int kSize>
+typename EigenTypes<kSize, kSize>::Matrix RandomPSDMatrixWithEigenValues(
+    const typename EigenTypes<kSize>::Vector& eigenvalues) {
+  typename EigenTypes<kSize, kSize>::Matrix m;
+  m.setRandom();
+  Eigen::SelfAdjointEigenSolver<typename EigenTypes<kSize, kSize>::Matrix> es(
+      m);
+  return es.eigenvectors() * eigenvalues.asDiagonal() *
+         es.eigenvectors().transpose();
+}
+
+TEST(InvertPSDMatrix, Identity3x3) {
+  const Matrix m = Matrix::Identity(3, 3);
+  const Matrix inverse_m = InvertPSDMatrix<3>(kFullRank, m);
+  EXPECT_NEAR((inverse_m - m).norm() / m.norm(),
+              0.0,
+              std::numeric_limits<double>::epsilon());
+}
+
+TEST(InvertPSDMatrix, FullRank5x5) {
+  EigenTypes<5>::Vector eigenvalues;
+  eigenvalues.setRandom();
+  eigenvalues = eigenvalues.array().abs().matrix();
+  const Matrix m = RandomPSDMatrixWithEigenValues<5>(eigenvalues);
+  const Matrix inverse_m = InvertPSDMatrix<5>(kFullRank, m);
+  EXPECT_NEAR((m * inverse_m - Matrix::Identity(5,5)).norm() / 5.0,  0.0,
+              std::numeric_limits<double>::epsilon());
+}
+
+TEST(InvertPSDMatrix, RankDeficient5x5) {
+  EigenTypes<5>::Vector eigenvalues;
+  eigenvalues.setRandom();
+  eigenvalues = eigenvalues.array().abs().matrix();
+  eigenvalues(3) = 0.0;
+  const Matrix m = RandomPSDMatrixWithEigenValues<5>(eigenvalues);
+  const Matrix inverse_m = InvertPSDMatrix<5>(kRankDeficient, m);
+  Matrix pseudo_identity = Matrix::Identity(5, 5);
+  pseudo_identity(3, 3) = 0.0;
+  EXPECT_NEAR((m * inverse_m * m - m).norm() / m.norm(),
+              0.0,
+              10 * std::numeric_limits<double>::epsilon());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/is_close.cc b/internal/ceres/is_close.cc
new file mode 100644
index 0000000..a91a174
--- /dev/null
+++ b/internal/ceres/is_close.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keir@google.com (Keir Mierle), dgossow@google.com (David Gossow)
+
+#include "ceres/is_close.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace ceres {
+namespace internal {
+bool IsClose(double x, double y, double relative_precision,
+             double *relative_error,
+             double *absolute_error) {
+  double local_absolute_error;
+  double local_relative_error;
+  if (!absolute_error) {
+    absolute_error = &local_absolute_error;
+  }
+  if (!relative_error) {
+    relative_error = &local_relative_error;
+  }
+  *absolute_error = std::fabs(x - y);
+  *relative_error = *absolute_error / std::max(std::fabs(x), std::fabs(y));
+  if (x == 0 || y == 0) {
+    // If x or y is exactly zero, then relative difference doesn't have any
+    // meaning. Take the absolute difference instead.
+    *relative_error = *absolute_error;
+  }
+  return *relative_error < std::fabs(relative_precision);
+}
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/is_close.h b/internal/ceres/is_close.h
new file mode 100644
index 0000000..7789448
--- /dev/null
+++ b/internal/ceres/is_close.h
@@ -0,0 +1,51 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keir@google.com (Keir Mierle), dgossow@google.com (David Gossow)
+//
+// Utility routine for comparing two values.
+
+#ifndef CERES_INTERNAL_IS_CLOSE_H_
+#define CERES_INTERNAL_IS_CLOSE_H_
+
+namespace ceres {
+namespace internal {
+// Returns true if x and y have a relative (unsigned) difference less than
+// relative_precision and false otherwise. Stores the relative and absolute
+// difference in relative/absolute_error if non-NULL. If one of the two values
+// is exactly zero, the absolute difference will be compared, and relative_error
+// will be set to the absolute difference.
+bool IsClose(double x,
+             double y,
+             double relative_precision,
+             double *relative_error,
+             double *absolute_error);
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_IS_CLOSE_H_
diff --git a/internal/ceres/is_close_test.cc b/internal/ceres/is_close_test.cc
new file mode 100644
index 0000000..8f7aaba
--- /dev/null
+++ b/internal/ceres/is_close_test.cc
@@ -0,0 +1,177 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: dgossow@google.com (David Gossow)
+//
+// This file contains tests for the IsClose function.
+
+#include "ceres/is_close.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 1e-9;
+
+TEST(IsClose, BothParametersPositive) {
+  double relative_error = -1;
+  double absolute_error = -1;
+
+  // Test cases where both values are positive.
+  EXPECT_TRUE(IsClose(9.9, 10.0, 0.011, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(10.0, 9.9, 0.011, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+
+  EXPECT_FALSE(IsClose(9.9, 10.0, 0.009, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(10.0, 9.9, 0.009, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+}
+
+TEST(IsClose, BothParametersNegative) {
+  double relative_error = -1;
+  double absolute_error = -1;
+
+  // Test cases where both values are negative.
+  EXPECT_TRUE(IsClose(-9.9, -10.0, 0.011, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(-10.0, -9.9, 0.011, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+
+  EXPECT_FALSE(IsClose(-9.9, -10.0, 0.009, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(-10.0, -9.9, 0.009, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.01, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.1, kTolerance);
+}
+
+TEST(IsClose, ParametersHaveMixedSigns) {
+  double relative_error = -1;
+  double absolute_error = -1;
+
+  // Test cases with mixed signs.
+  EXPECT_FALSE(IsClose(-0.1, 0.1, 1.99, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 2.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.2, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(-0.1, 0.1, 2.01, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 2.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.2, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(0.1, -0.1, 1.99, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 2.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.2, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(0.1, -0.1, 2.01, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 2.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.2, kTolerance);
+}
+
+TEST(IsClose, OneParameterZero) {
+  double relative_error = -1;
+  double absolute_error = -1;
+
+  // Test cases where one of the values is zero.
+  EXPECT_TRUE(IsClose(0.0, 10.0, 10.1, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(10.0, 0.0, 10.1, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(0.0, -10.0, 10.1, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_TRUE(IsClose(-10.0, 0.0, 10.1, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+
+  EXPECT_FALSE(IsClose(0, 10.0, 9.9, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(10.0, 0.0, 9.9, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(0, -10.0, 9.9, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(-10.0, 0.0, 9.9, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 10.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 10.0, kTolerance);
+}
+
+TEST(IsClose, BothParametersZero) {
+  double relative_error = -1;
+  double absolute_error = -1;
+  EXPECT_TRUE(IsClose(0.0, 0.0, 0.1, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.0, kTolerance);
+  relative_error = -1;
+  absolute_error = -1;
+  EXPECT_FALSE(IsClose(0.0, 0.0, 0.0, &relative_error, &absolute_error));
+  EXPECT_NEAR(relative_error, 0.0, kTolerance);
+  EXPECT_NEAR(absolute_error, 0.0, kTolerance);
+}
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/iterative_refiner.cc b/internal/ceres/iterative_refiner.cc
new file mode 100644
index 0000000..fb0e45b
--- /dev/null
+++ b/internal/ceres/iterative_refiner.cc
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <string>
+#include "ceres/iterative_refiner.h"
+
+#include "Eigen/Core"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+IterativeRefiner::IterativeRefiner(const int max_num_iterations)
+    : max_num_iterations_(max_num_iterations) {}
+
+IterativeRefiner::~IterativeRefiner() {}
+
+void IterativeRefiner::Allocate(int num_cols) {
+  residual_.resize(num_cols);
+  correction_.resize(num_cols);
+  lhs_x_solution_.resize(num_cols);
+}
+
+void IterativeRefiner::Refine(const SparseMatrix& lhs,
+                              const double* rhs_ptr,
+                              SparseCholesky* sparse_cholesky,
+                              double* solution_ptr) {
+  const int num_cols = lhs.num_cols();
+  Allocate(num_cols);
+  ConstVectorRef rhs(rhs_ptr, num_cols);
+  VectorRef solution(solution_ptr, num_cols);
+  for (int i = 0; i < max_num_iterations_; ++i) {
+    // residual = rhs - lhs * solution
+    lhs_x_solution_.setZero();
+    lhs.RightMultiply(solution_ptr, lhs_x_solution_.data());
+    residual_ = rhs - lhs_x_solution_;
+    // solution += lhs^-1 residual
+    std::string ignored_message;
+    sparse_cholesky->Solve(
+        residual_.data(), correction_.data(), &ignored_message);
+    solution += correction_;
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/iterative_refiner.h b/internal/ceres/iterative_refiner.h
new file mode 100644
index 0000000..f969935
--- /dev/null
+++ b/internal/ceres/iterative_refiner.h
@@ -0,0 +1,93 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_ITERATIVE_REFINER_H_
+#define CERES_INTERNAL_ITERATIVE_REFINER_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseCholesky;
+class SparseMatrix;
+
+// Iterative refinement
+// (https://en.wikipedia.org/wiki/Iterative_refinement) is the process
+// of improving the solution to a linear system, by using the
+// following iteration.
+//
+// r_i = b - Ax_i
+// Ad_i = r_i
+// x_{i+1} = x_i + d_i
+//
+// IterativeRefiner implements this process for Symmetric Positive
+// Definite linear systems.
+//
+// The above iterative loop is run until max_num_iterations is reached.
+class IterativeRefiner {
+ public:
+  // max_num_iterations is the number of refinement iterations to
+  // perform.
+  IterativeRefiner(int max_num_iterations);
+
+  // Needed for mocking.
+  virtual ~IterativeRefiner();
+
+  // Given an initial estimate of the solution of lhs * x = rhs, use
+  // max_num_iterations rounds of iterative refinement to improve it.
+  //
+  // sparse_cholesky is assumed to contain an already computed
+  // factorization (or approximation thereof) of lhs.
+  //
+  // solution is expected to contain a approximation to the solution
+  // to lhs * x = rhs. It can be zero.
+  //
+  // This method is virtual to facilitate mocking.
+  virtual void Refine(const SparseMatrix& lhs,
+                      const double* rhs,
+                      SparseCholesky* sparse_cholesky,
+                      double* solution);
+
+ private:
+  void Allocate(int num_cols);
+
+  int max_num_iterations_;
+  Vector residual_;
+  Vector correction_;
+  Vector lhs_x_solution_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_ITERATIVE_REFINER_H_
diff --git a/internal/ceres/iterative_refiner_test.cc b/internal/ceres/iterative_refiner_test.cc
new file mode 100644
index 0000000..7ca0a5e
--- /dev/null
+++ b/internal/ceres/iterative_refiner_test.cc
@@ -0,0 +1,173 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/iterative_refiner.h"
+
+#include "Eigen/Dense"
+#include "ceres/internal/eigen.h"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Macros to help us define virtual methods which we do not expect to
+// use/call in this test.
+#define DO_NOT_CALL \
+  { LOG(FATAL) << "DO NOT CALL"; }
+#define DO_NOT_CALL_WITH_RETURN(x) \
+  {                                \
+    LOG(FATAL) << "DO NOT CALL";   \
+    return x;                      \
+  }
+
+// A fake SparseMatrix, which uses an Eigen matrix to do the real work.
+class FakeSparseMatrix : public SparseMatrix {
+ public:
+  FakeSparseMatrix(const Matrix& m) : m_(m) {}
+  virtual ~FakeSparseMatrix() {}
+
+  // y += Ax
+  virtual void RightMultiply(const double* x, double* y) const {
+    VectorRef(y, m_.cols()) += m_ * ConstVectorRef(x, m_.cols());
+  }
+  // y += A'x
+  virtual void LeftMultiply(const double* x, double* y) const {
+    // We will assume that this is a symmetric matrix.
+    RightMultiply(x, y);
+  }
+
+  virtual double* mutable_values() { return m_.data(); }
+  virtual const double* values() const { return m_.data(); }
+  virtual int num_rows() const { return m_.cols(); }
+  virtual int num_cols() const { return m_.cols(); }
+  virtual int num_nonzeros() const { return m_.cols() * m_.cols(); }
+
+  // The following methods are not needed for tests in this file.
+  virtual void SquaredColumnNorm(double* x) const DO_NOT_CALL;
+  virtual void ScaleColumns(const double* scale) DO_NOT_CALL;
+  virtual void SetZero() DO_NOT_CALL;
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const DO_NOT_CALL;
+  virtual void ToTextFile(FILE* file) const DO_NOT_CALL;
+
+ private:
+  Matrix m_;
+};
+
+// A fake SparseCholesky which uses Eigen's Cholesky factorization to
+// do the real work. The template parameter allows us to work in
+// doubles or floats, even though the source matrix is double.
+template <typename Scalar>
+class FakeSparseCholesky : public SparseCholesky {
+ public:
+  FakeSparseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); }
+  virtual ~FakeSparseCholesky() {}
+
+  virtual LinearSolverTerminationType Solve(const double* rhs_ptr,
+                                            double* solution_ptr,
+                                            std::string* message) {
+    const int num_cols = lhs_.cols();
+    VectorRef solution(solution_ptr, num_cols);
+    ConstVectorRef rhs(rhs_ptr, num_cols);
+    solution = lhs_.llt().solve(rhs.cast<Scalar>()).template cast<double>();
+    return LINEAR_SOLVER_SUCCESS;
+  }
+
+  // The following methods are not needed for tests in this file.
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const
+      DO_NOT_CALL_WITH_RETURN(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+  virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+                                                std::string* message)
+      DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
+
+  virtual LinearSolverTerminationType FactorAndSolve(
+      CompressedRowSparseMatrix* lhs,
+      const double* rhs,
+      double* solution,
+      std::string* message) DO_NOT_CALL_WITH_RETURN(LINEAR_SOLVER_FAILURE);
+
+ private:
+  Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_;
+};
+
+#undef DO_NOT_CALL
+#undef DO_NOT_CALL_WITH_RETURN
+
+class IterativeRefinerTest : public ::testing::Test {
+ public:
+  void SetUp() {
+    num_cols_ = 5;
+    max_num_iterations_ = 30;
+    Matrix m(num_cols_, num_cols_);
+    m.setRandom();
+    lhs_ = m * m.transpose();
+    solution_.resize(num_cols_);
+    solution_.setRandom();
+    rhs_ = lhs_ * solution_;
+  };
+
+ protected:
+  int num_cols_;
+  int max_num_iterations_;
+  Matrix lhs_;
+  Vector rhs_, solution_;
+};
+
+TEST_F(IterativeRefinerTest, RandomSolutionWithExactFactorizationConverges) {
+  FakeSparseMatrix lhs(lhs_);
+  FakeSparseCholesky<double> sparse_cholesky(lhs_);
+  IterativeRefiner refiner(max_num_iterations_);
+  Vector refined_solution(num_cols_);
+  refined_solution.setRandom();
+  refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data());
+  EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
+              0.0,
+              std::numeric_limits<double>::epsilon() * 10);
+}
+
+TEST_F(IterativeRefinerTest,
+       RandomSolutionWithApproximationFactorizationConverges) {
+  FakeSparseMatrix lhs(lhs_);
+  // Use a single precision Cholesky factorization of the double
+  // precision matrix. This will give us an approximate factorization.
+  FakeSparseCholesky<float> sparse_cholesky(lhs_);
+  IterativeRefiner refiner(max_num_iterations_);
+  Vector refined_solution(num_cols_);
+  refined_solution.setRandom();
+  refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data());
+  EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
+              0.0,
+              std::numeric_limits<double>::epsilon() * 10);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
new file mode 100644
index 0000000..6076c38
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -0,0 +1,176 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/iterative_schur_complement_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/detect_structure.h"
+#include "ceres/implicit_schur_complement.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/preconditioner.h"
+#include "ceres/schur_jacobi_preconditioner.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/visibility_based_preconditioner.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+IterativeSchurComplementSolver::IterativeSchurComplementSolver(
+    const LinearSolver::Options& options)
+    : options_(options) {
+}
+
+IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {}
+
+LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
+    BlockSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("IterativeSchurComplementSolver::Solve");
+
+  CHECK(A->block_structure() != nullptr);
+  const int num_eliminate_blocks = options_.elimination_groups[0];
+  // Initialize a ImplicitSchurComplement object.
+  if (schur_complement_ == NULL) {
+    DetectStructure(*(A->block_structure()),
+                    num_eliminate_blocks,
+                    &options_.row_block_size,
+                    &options_.e_block_size,
+                    &options_.f_block_size);
+    schur_complement_.reset(new ImplicitSchurComplement(options_));
+  }
+  schur_complement_->Init(*A, per_solve_options.D, b);
+
+  const int num_schur_complement_blocks =
+      A->block_structure()->cols.size() - num_eliminate_blocks;
+  if (num_schur_complement_blocks == 0) {
+    VLOG(2) << "No parameter blocks left in the schur complement.";
+    LinearSolver::Summary summary;
+    summary.num_iterations = 0;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    schur_complement_->BackSubstitute(NULL, x);
+    return summary;
+  }
+
+  // Initialize the solution to the Schur complement system to zero.
+  reduced_linear_system_solution_.resize(schur_complement_->num_rows());
+  reduced_linear_system_solution_.setZero();
+
+  LinearSolver::Options cg_options;
+  cg_options.min_num_iterations = options_.min_num_iterations;
+  cg_options.max_num_iterations = options_.max_num_iterations;
+  ConjugateGradientsSolver cg_solver(cg_options);
+
+  LinearSolver::PerSolveOptions cg_per_solve_options;
+  cg_per_solve_options.r_tolerance = per_solve_options.r_tolerance;
+  cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
+
+  CreatePreconditioner(A);
+  if (preconditioner_.get() != NULL) {
+    if (!preconditioner_->Update(*A, per_solve_options.D)) {
+      LinearSolver::Summary summary;
+      summary.num_iterations = 0;
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message = "Preconditioner update failed.";
+      return summary;
+    }
+
+    cg_per_solve_options.preconditioner = preconditioner_.get();
+  }
+
+  event_logger.AddEvent("Setup");
+  LinearSolver::Summary summary =
+      cg_solver.Solve(schur_complement_.get(),
+                      schur_complement_->rhs().data(),
+                      cg_per_solve_options,
+                      reduced_linear_system_solution_.data());
+  if (summary.termination_type != LINEAR_SOLVER_FAILURE &&
+      summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
+    schur_complement_->BackSubstitute(reduced_linear_system_solution_.data(),
+                                      x);
+  }
+  event_logger.AddEvent("Solve");
+  return summary;
+}
+
+void IterativeSchurComplementSolver::CreatePreconditioner(
+    BlockSparseMatrix* A) {
+  if (options_.preconditioner_type == IDENTITY ||
+      preconditioner_.get() != NULL) {
+    return;
+  }
+
+  Preconditioner::Options preconditioner_options;
+  preconditioner_options.type = options_.preconditioner_type;
+  preconditioner_options.visibility_clustering_type =
+      options_.visibility_clustering_type;
+  preconditioner_options.sparse_linear_algebra_library_type =
+      options_.sparse_linear_algebra_library_type;
+  preconditioner_options.num_threads = options_.num_threads;
+  preconditioner_options.row_block_size = options_.row_block_size;
+  preconditioner_options.e_block_size = options_.e_block_size;
+  preconditioner_options.f_block_size = options_.f_block_size;
+  preconditioner_options.elimination_groups = options_.elimination_groups;
+  CHECK(options_.context != NULL);
+  preconditioner_options.context = options_.context;
+
+  switch (options_.preconditioner_type) {
+    case JACOBI:
+      preconditioner_.reset(new SparseMatrixPreconditionerWrapper(
+          schur_complement_->block_diagonal_FtF_inverse()));
+      break;
+    case SCHUR_JACOBI:
+      preconditioner_.reset(new SchurJacobiPreconditioner(
+          *A->block_structure(), preconditioner_options));
+      break;
+    case CLUSTER_JACOBI:
+    case CLUSTER_TRIDIAGONAL:
+      preconditioner_.reset(new VisibilityBasedPreconditioner(
+          *A->block_structure(), preconditioner_options));
+      break;
+    default:
+      LOG(FATAL) << "Unknown Preconditioner Type";
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/iterative_schur_complement_solver.h b/internal/ceres/iterative_schur_complement_solver.h
new file mode 100644
index 0000000..c058f81
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver.h
@@ -0,0 +1,96 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
+#define CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
+
+#include <memory>
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class ImplicitSchurComplement;
+class Preconditioner;
+
+// This class implements an iterative solver for the linear least
+// squares problems that have a bi-partite sparsity structure common
+// to Structure from Motion problems.
+//
+// The algorithm used by this solver was developed in a series of
+// papers - "Agarwal et al, Bundle Adjustment in the Large, ECCV 2010"
+// and "Wu et al, Multicore Bundle Adjustment, submitted to CVPR
+// 2011" at the Univeristy of Washington.
+//
+// The key idea is that one can run Conjugate Gradients on the Schur
+// Complement system without explicitly forming the Schur Complement
+// in memory. The heavy lifting for this is done by the
+// ImplicitSchurComplement class. Not forming the Schur complement in
+// memory and factoring it results in substantial savings in time and
+// memory. Further, iterative solvers like this open up the
+// possibility of solving the Newton equations in a non-linear solver
+// only approximately and terminating early, thereby saving even more
+// time.
+//
+// For the curious, running CG on the Schur complement is the same as
+// running CG on the Normal Equations with an SSOR preconditioner. For
+// a proof of this fact and others related to this solver please see
+// the section on Domain Decomposition Methods in Saad's book
+// "Iterative Methods for Sparse Linear Systems".
+class IterativeSchurComplementSolver : public BlockSparseMatrixSolver {
+ public:
+  explicit IterativeSchurComplementSolver(const LinearSolver::Options& options);
+  IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) = delete;
+  void operator=(const IterativeSchurComplementSolver&) = delete;
+
+  virtual ~IterativeSchurComplementSolver();
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      BlockSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& options,
+      double* x);
+
+  void CreatePreconditioner(BlockSparseMatrix* A);
+
+  LinearSolver::Options options_;
+  std::unique_ptr<internal::ImplicitSchurComplement> schur_complement_;
+  std::unique_ptr<Preconditioner> preconditioner_;
+  Vector reduced_linear_system_solution_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
diff --git a/internal/ceres/iterative_schur_complement_solver_test.cc b/internal/ceres/iterative_schur_complement_solver_test.cc
new file mode 100644
index 0000000..3bf2d92
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver_test.cc
@@ -0,0 +1,135 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// TODO(sameeragarwal): Add support for larger, more complicated and
+// poorly conditioned problems both for correctness testing as well as
+// benchmarking.
+
+#include "ceres/iterative_schur_complement_solver.h"
+
+#include <cstddef>
+#include <memory>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::AssertionResult;
+
+const double kEpsilon = 1e-14;
+
+class IterativeSchurComplementSolverTest : public ::testing::Test {
+ protected :
+  void SetUpProblem(int problem_id) {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(problem_id));
+
+    CHECK(problem != nullptr);
+    A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    b_.reset(problem->b.release());
+    D_.reset(problem->D.release());
+
+    num_cols_ = A_->num_cols();
+    num_rows_ = A_->num_rows();
+    num_eliminate_blocks_ = problem->num_eliminate_blocks;
+  }
+
+  AssertionResult TestSolver(double* D) {
+    TripletSparseMatrix triplet_A(A_->num_rows(),
+                                  A_->num_cols(),
+                                  A_->num_nonzeros());
+    A_->ToTripletSparseMatrix(&triplet_A);
+
+    DenseSparseMatrix dense_A(triplet_A);
+
+    LinearSolver::Options options;
+    options.type = DENSE_QR;
+    ContextImpl context;
+    options.context = &context;
+    std::unique_ptr<LinearSolver> qr(LinearSolver::Create(options));
+
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D;
+    Vector reference_solution(num_cols_);
+    qr->Solve(&dense_A, b_.get(), per_solve_options, reference_solution.data());
+
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    options.elimination_groups.push_back(0);
+    options.max_num_iterations = num_cols_;
+    options.preconditioner_type = SCHUR_JACOBI;
+    IterativeSchurComplementSolver isc(options);
+
+    Vector isc_sol(num_cols_);
+    per_solve_options.r_tolerance  = 1e-12;
+    isc.Solve(A_.get(), b_.get(), per_solve_options, isc_sol.data());
+    double diff = (isc_sol - reference_solution).norm();
+    if (diff < kEpsilon) {
+      return testing::AssertionSuccess();
+    } else {
+      return testing::AssertionFailure()
+          << "The reference solution differs from the ITERATIVE_SCHUR"
+          << " solution by " << diff << " which is more than " << kEpsilon;
+    }
+  }
+
+  int num_rows_;
+  int num_cols_;
+  int num_eliminate_blocks_;
+  std::unique_ptr<BlockSparseMatrix> A_;
+  std::unique_ptr<double[]> b_;
+  std::unique_ptr<double[]> D_;
+};
+
+TEST_F(IterativeSchurComplementSolverTest, NormalProblem) {
+  SetUpProblem(2);
+  EXPECT_TRUE(TestSolver(NULL));
+  EXPECT_TRUE(TestSolver(D_.get()));
+}
+
+TEST_F(IterativeSchurComplementSolverTest, ProblemWithNoFBlocks) {
+  SetUpProblem(3);
+  EXPECT_TRUE(TestSolver(NULL));
+  EXPECT_TRUE(TestSolver(D_.get()));
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/jet_test.cc b/internal/ceres/jet_test.cc
new file mode 100644
index 0000000..6ae6ef7
--- /dev/null
+++ b/internal/ceres/jet_test.cc
@@ -0,0 +1,902 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/jet.h"
+
+#include <Eigen/Dense>
+#include <algorithm>
+#include <cmath>
+
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#define VL VLOG(1)
+
+namespace ceres {
+namespace internal {
+
+const double kE = 2.71828182845904523536;
+
+typedef Jet<double, 2> J;
+
+// Convenient shorthand for making a jet.
+J MakeJet(double a, double v0, double v1) {
+  J z;
+  z.a = a;
+  z.v[0] = v0;
+  z.v[1] = v1;
+  return z;
+}
+
+// On a 32-bit optimized build, the mismatch is about 1.4e-14.
+double const kTolerance = 1e-13;
+
+void ExpectJetsClose(const J &x, const J &y) {
+  ExpectClose(x.a, y.a, kTolerance);
+  ExpectClose(x.v[0], y.v[0], kTolerance);
+  ExpectClose(x.v[1], y.v[1], kTolerance);
+}
+
+const double kStep = 1e-8;
+const double kNumericalTolerance = 1e-6; // Numeric derivation is quite inexact
+
+// Differentiate using Jet and confirm results with numerical derivation.
+template<typename Function>
+void NumericalTest(const char* name, const Function& f, const double x) {
+  const double exact_dx = f(MakeJet(x, 1.0, 0.0)).v[0];
+  const double estimated_dx =
+    (f(J(x + kStep)).a - f(J(x - kStep)).a) / (2.0 * kStep);
+  VL << name << "(" << x << "), exact dx: "
+     << exact_dx << ", estimated dx: " << estimated_dx;
+  ExpectClose(exact_dx, estimated_dx, kNumericalTolerance);
+}
+
+// Same as NumericalTest, but given a function taking two arguments.
+template<typename Function>
+void NumericalTest2(const char* name, const Function& f,
+                    const double x, const double y) {
+  const J exact_delta = f(MakeJet(x, 1.0, 0.0), MakeJet(y, 0.0, 1.0));
+  const double exact_dx = exact_delta.v[0];
+  const double exact_dy = exact_delta.v[1];
+
+  // Sanity check – these should be equivalent:
+  EXPECT_EQ(exact_dx, f(MakeJet(x, 1.0, 0.0), MakeJet(y, 0.0, 0.0)).v[0]);
+  EXPECT_EQ(exact_dx, f(MakeJet(x, 0.0, 1.0), MakeJet(y, 0.0, 0.0)).v[1]);
+  EXPECT_EQ(exact_dy, f(MakeJet(x, 0.0, 0.0), MakeJet(y, 1.0, 0.0)).v[0]);
+  EXPECT_EQ(exact_dy, f(MakeJet(x, 0.0, 0.0), MakeJet(y, 0.0, 1.0)).v[1]);
+
+  const double estimated_dx =
+    (f(J(x + kStep), J(y)).a - f(J(x - kStep), J(y)).a) / (2.0 * kStep);
+  const double estimated_dy =
+    (f(J(x), J(y + kStep)).a - f(J(x), J(y - kStep)).a) / (2.0 * kStep);
+  VL << name << "(" << x << ", " << y << "), exact dx: "
+     << exact_dx << ", estimated dx: " << estimated_dx;
+  ExpectClose(exact_dx, estimated_dx, kNumericalTolerance);
+  VL << name << "(" << x << ", " << y << "), exact dy: "
+     << exact_dy << ", estimated dy: " << estimated_dy;
+  ExpectClose(exact_dy, estimated_dy, kNumericalTolerance);
+}
+
+TEST(Jet, Jet) {
+  // Pick arbitrary values for x and y.
+  J x = MakeJet(2.3, -2.7, 1e-3);
+  J y = MakeJet(1.7,  0.5, 1e+2);
+
+  VL << "x = " << x;
+  VL << "y = " << y;
+
+  { // Check that log(exp(x)) == x.
+    J z = exp(x);
+    J w = log(z);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, x);
+  }
+
+  { // Check that (x * y) / x == y.
+    J z = x * y;
+    J w = z / x;
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, y);
+  }
+
+  { // Check that sqrt(x * x) == x.
+    J z = x * x;
+    J w = sqrt(z);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, x);
+  }
+
+  { // Check that sqrt(y) * sqrt(y) == y.
+    J z = sqrt(y);
+    J w = z * z;
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, y);
+  }
+
+  NumericalTest("sqrt", sqrt<double, 2>, 0.00001);
+  NumericalTest("sqrt", sqrt<double, 2>, 1.0);
+
+  { // Check that cos(2*x) = cos(x)^2 - sin(x)^2
+    J z = cos(J(2.0) * x);
+    J w = cos(x)*cos(x) - sin(x)*sin(x);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, z);
+  }
+
+  { // Check that sin(2*x) = 2*cos(x)*sin(x)
+    J z = sin(J(2.0) * x);
+    J w = J(2.0)*cos(x)*sin(x);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, z);
+  }
+
+  { // Check that cos(x)*cos(x) + sin(x)*sin(x) = 1
+    J z = cos(x) * cos(x);
+    J w = sin(x) * sin(x);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z + w, J(1.0));
+  }
+
+  { // Check that atan2(r*sin(t), r*cos(t)) = t.
+    J t = MakeJet(0.7, -0.3, +1.5);
+    J r = MakeJet(2.3, 0.13, -2.4);
+    VL << "t = " << t;
+    VL << "r = " << r;
+
+    J u = atan2(r * sin(t), r * cos(t));
+    VL << "u = " << u;
+
+    ExpectJetsClose(u, t);
+  }
+
+  { // Check that tan(x) = sin(x) / cos(x).
+    J z = tan(x);
+    J w = sin(x) / cos(x);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+
+  { // Check that tan(atan(x)) = x.
+    J z = tan(atan(x));
+    J w = x;
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+
+  { // Check that cosh(x)*cosh(x) - sinh(x)*sinh(x) = 1
+    J z = cosh(x) * cosh(x);
+    J w = sinh(x) * sinh(x);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z - w, J(1.0));
+  }
+
+  { // Check that tanh(x + y) = (tanh(x) + tanh(y)) / (1 + tanh(x) tanh(y))
+    J z = tanh(x + y);
+    J w = (tanh(x) + tanh(y)) / (J(1.0) + tanh(x) * tanh(y));
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+
+  { // Check that pow(x, 1) == x.
+    VL << "x = " << x;
+
+    J u = pow(x, 1.);
+    VL << "u = " << u;
+
+    ExpectJetsClose(x, u);
+  }
+
+  { // Check that pow(x, 1) == x.
+    J y = MakeJet(1, 0.0, 0.0);
+    VL << "x = " << x;
+    VL << "y = " << y;
+
+    J u = pow(x, y);
+    VL << "u = " << u;
+
+    ExpectJetsClose(x, u);
+  }
+
+  { // Check that pow(e, log(x)) == x.
+    J logx = log(x);
+
+    VL << "x = " << x;
+    VL << "y = " << y;
+
+    J u = pow(kE, logx);
+    VL << "u = " << u;
+
+    ExpectJetsClose(x, u);
+  }
+
+  { // Check that pow(e, log(x)) == x.
+    J logx = log(x);
+    J e = MakeJet(kE, 0., 0.);
+    VL << "x = " << x;
+    VL << "log(x) = " << logx;
+
+    J u = pow(e, logx);
+    VL << "u = " << u;
+
+    ExpectJetsClose(x, u);
+  }
+
+  { // Check that pow(e, log(x)) == x.
+    J logx = log(x);
+    J e = MakeJet(kE, 0., 0.);
+    VL << "x = " << x;
+    VL << "logx = " << logx;
+
+    J u = pow(e, logx);
+    VL << "u = " << u;
+
+    ExpectJetsClose(x, u);
+  }
+
+  { // Check that pow(x,y) = exp(y*log(x)).
+    J logx = log(x);
+    J e = MakeJet(kE, 0., 0.);
+    VL << "x = " << x;
+    VL << "logx = " << logx;
+
+    J u = pow(e, y*logx);
+    J v = pow(x, y);
+    VL << "u = " << u;
+    VL << "v = " << v;
+
+    ExpectJetsClose(v, u);
+  }
+
+  { // Check that pow(0, y) == 0 for y > 1, with both arguments Jets.
+    // This tests special case handling inside pow().
+    J a = MakeJet(0, 1, 2);
+    J b = MakeJet(2, 3, 4);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    J c = pow(a, b);
+    VL << "a^b = " << c;
+    ExpectJetsClose(c, MakeJet(0, 0, 0));
+  }
+
+  { // Check that pow(0, y) == 0 for y == 1, with both arguments Jets.
+    // This tests special case handling inside pow().
+    J a = MakeJet(0, 1, 2);
+    J b = MakeJet(1, 3, 4);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    J c = pow(a, b);
+    VL << "a^b = " << c;
+    ExpectJetsClose(c, MakeJet(0, 1, 2));
+  }
+
+  { // Check that pow(0, <1) is not finite, with both arguments Jets.
+    for (int i = 1; i < 10; i++) {
+      J a = MakeJet(0, 1, 2);
+      J b = MakeJet(i*0.1, 3, 4);       // b = 0.1 ... 0.9
+      VL << "a = " << a;
+      VL << "b = " << b;
+
+      J c = pow(a, b);
+      VL << "a^b = " << c;
+      EXPECT_EQ(c.a, 0.0);
+      EXPECT_FALSE(IsFinite(c.v[0]));
+      EXPECT_FALSE(IsFinite(c.v[1]));
+    }
+    for (int i = -10; i < 0; i++) {
+      J a = MakeJet(0, 1, 2);
+      J b = MakeJet(i*0.1, 3, 4);       // b = -1,-0.9 ... -0.1
+      VL << "a = " << a;
+      VL << "b = " << b;
+
+      J c = pow(a, b);
+      VL << "a^b = " << c;
+      EXPECT_FALSE(IsFinite(c.a));
+      EXPECT_FALSE(IsFinite(c.v[0]));
+      EXPECT_FALSE(IsFinite(c.v[1]));
+    }
+
+    {
+      // The special case of 0^0 = 1 defined by the C standard.
+      J a = MakeJet(0, 1, 2);
+      J b = MakeJet(0, 3, 4);
+      VL << "a = " << a;
+      VL << "b = " << b;
+
+      J c = pow(a, b);
+      VL << "a^b = " << c;
+      EXPECT_EQ(c.a, 1.0);
+      EXPECT_FALSE(IsFinite(c.v[0]));
+      EXPECT_FALSE(IsFinite(c.v[1]));
+    }
+  }
+
+  { // Check that pow(<0, b) is correct for integer b.
+    // This tests special case handling inside pow().
+    J a = MakeJet(-1.5, 3, 4);
+
+    // b integer:
+    for (int i = -10; i <= 10; i++) {
+      J b = MakeJet(i, 0, 5);
+      VL << "a = " << a;
+      VL << "b = " << b;
+
+      J c = pow(a, b);
+      VL << "a^b = " << c;
+      ExpectClose(c.a, pow(-1.5, i), kTolerance);
+      EXPECT_TRUE(IsFinite(c.v[0]));
+      EXPECT_FALSE(IsFinite(c.v[1]));
+      ExpectClose(c.v[0], i * pow(-1.5, i - 1) * 3.0, kTolerance);
+    }
+  }
+
+  { // Check that pow(<0, b) is correct for noninteger b.
+    // This tests special case handling inside pow().
+    J a = MakeJet(-1.5, 3, 4);
+    J b = MakeJet(-2.5, 0, 5);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    J c = pow(a, b);
+    VL << "a^b = " << c;
+    EXPECT_FALSE(IsFinite(c.a));
+    EXPECT_FALSE(IsFinite(c.v[0]));
+    EXPECT_FALSE(IsFinite(c.v[1]));
+  }
+
+  {
+    // Check that pow(0,y) == 0 for y == 2, with the second argument a
+    // Jet.  This tests special case handling inside pow().
+    double a = 0;
+    J b = MakeJet(2, 3, 4);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    J c = pow(a, b);
+    VL << "a^b = " << c;
+    ExpectJetsClose(c, MakeJet(0, 0, 0));
+  }
+
+  {
+    // Check that pow(<0,y) is correct for integer y. This tests special case
+    // handling inside pow().
+    double a = -1.5;
+    for (int i = -10; i <= 10; i++) {
+      J b = MakeJet(i, 3, 0);
+      VL << "a = " << a;
+      VL << "b = " << b;
+
+      J c = pow(a, b);
+      VL << "a^b = " << c;
+      ExpectClose(c.a, pow(-1.5, i), kTolerance);
+      EXPECT_FALSE(IsFinite(c.v[0]));
+      EXPECT_TRUE(IsFinite(c.v[1]));
+      ExpectClose(c.v[1], 0, kTolerance);
+    }
+  }
+
+  {
+    // Check that pow(<0,y) is correct for noninteger y. This tests special
+    // case handling inside pow().
+    double a = -1.5;
+    J b = MakeJet(-3.14, 3, 0);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    J c = pow(a, b);
+    VL << "a^b = " << c;
+    EXPECT_FALSE(IsFinite(c.a));
+    EXPECT_FALSE(IsFinite(c.v[0]));
+    EXPECT_FALSE(IsFinite(c.v[1]));
+  }
+
+  { // Check that 1 + x == x + 1.
+    J a = x + 1.0;
+    J b = 1.0 + x;
+    J c = x;
+    c += 1.0;
+
+    ExpectJetsClose(a, b);
+    ExpectJetsClose(a, c);
+  }
+
+  { // Check that 1 - x == -(x - 1).
+    J a = 1.0 - x;
+    J b = -(x - 1.0);
+    J c = x;
+    c -= 1.0;
+
+    ExpectJetsClose(a, b);
+    ExpectJetsClose(a, -c);
+  }
+
+  { // Check that (x/s)*s == (x*s)/s.
+    J a = x / 5.0;
+    J b = x * 5.0;
+    J c = x;
+    c /= 5.0;
+    J d = x;
+    d *= 5.0;
+
+    ExpectJetsClose(5.0 * a, b / 5.0);
+    ExpectJetsClose(a, c);
+    ExpectJetsClose(b, d);
+  }
+
+  { // Check that x / y == 1 / (y / x).
+    J a = x / y;
+    J b = 1.0 / (y / x);
+    VL << "a = " << a;
+    VL << "b = " << b;
+
+    ExpectJetsClose(a, b);
+  }
+
+  { // Check that abs(-x * x) == sqrt(x * x).
+    ExpectJetsClose(abs(-x), sqrt(x * x));
+  }
+
+  { // Check that cos(acos(x)) == x.
+    J a = MakeJet(0.1, -2.7, 1e-3);
+    ExpectJetsClose(cos(acos(a)), a);
+    ExpectJetsClose(acos(cos(a)), a);
+
+    J b = MakeJet(0.6,  0.5, 1e+2);
+    ExpectJetsClose(cos(acos(b)), b);
+    ExpectJetsClose(acos(cos(b)), b);
+  }
+
+  { // Check that sin(asin(x)) == x.
+    J a = MakeJet(0.1, -2.7, 1e-3);
+    ExpectJetsClose(sin(asin(a)), a);
+    ExpectJetsClose(asin(sin(a)), a);
+
+    J b = MakeJet(0.4,  0.5, 1e+2);
+    ExpectJetsClose(sin(asin(b)), b);
+    ExpectJetsClose(asin(sin(b)), b);
+  }
+
+  {
+    J zero = J(0.0);
+
+    // Check that J0(0) == 1.
+    ExpectJetsClose(BesselJ0(zero), J(1.0));
+
+    // Check that J1(0) == 0.
+    ExpectJetsClose(BesselJ1(zero), zero);
+
+    // Check that J2(0) == 0.
+    ExpectJetsClose(BesselJn(2, zero), zero);
+
+    // Check that J3(0) == 0.
+    ExpectJetsClose(BesselJn(3, zero), zero);
+
+    J z = MakeJet(0.1, -2.7, 1e-3);
+
+    // Check that J0(z) == Jn(0,z).
+    ExpectJetsClose(BesselJ0(z), BesselJn(0, z));
+
+    // Check that J1(z) == Jn(1,z).
+    ExpectJetsClose(BesselJ1(z), BesselJn(1, z));
+
+    // Check that J0(z)+J2(z) == (2/z)*J1(z).
+    // See formula http://dlmf.nist.gov/10.6.E1
+    ExpectJetsClose(BesselJ0(z) + BesselJn(2, z), (2.0 / z) * BesselJ1(z));
+  }
+
+  { // Check that floor of a positive number works.
+    J a = MakeJet(0.1, -2.7, 1e-3);
+    J b = floor(a);
+    J expected = MakeJet(floor(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that floor of a negative number works.
+    J a = MakeJet(-1.1, -2.7, 1e-3);
+    J b = floor(a);
+    J expected = MakeJet(floor(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that floor of a positive number works.
+    J a = MakeJet(10.123, -2.7, 1e-3);
+    J b = floor(a);
+    J expected = MakeJet(floor(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that ceil of a positive number works.
+    J a = MakeJet(0.1, -2.7, 1e-3);
+    J b = ceil(a);
+    J expected = MakeJet(ceil(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that ceil of a negative number works.
+    J a = MakeJet(-1.1, -2.7, 1e-3);
+    J b = ceil(a);
+    J expected = MakeJet(ceil(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that ceil of a positive number works.
+    J a = MakeJet(10.123, -2.7, 1e-3);
+    J b = ceil(a);
+    J expected = MakeJet(ceil(a.a), 0.0, 0.0);
+    EXPECT_EQ(expected, b);
+  }
+
+  { // Check that cbrt(x * x * x) == x.
+    J z = x * x * x;
+    J w = cbrt(z);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, x);
+  }
+
+  { // Check that cbrt(y) * cbrt(y) * cbrt(y) == y.
+    J z = cbrt(y);
+    J w = z * z * z;
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(w, y);
+  }
+
+  { // Check that cbrt(x) == pow(x, 1/3).
+    J z = cbrt(x);
+    J w = pow(x, 1.0 / 3.0);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+  NumericalTest("cbrt", cbrt<double, 2>, -1.0);
+  NumericalTest("cbrt", cbrt<double, 2>, -1e-5);
+  NumericalTest("cbrt", cbrt<double, 2>, 1e-5);
+  NumericalTest("cbrt", cbrt<double, 2>, 1.0);
+
+  { // Check that exp2(x) == exp(x * log(2))
+    J z = exp2(x);
+    J w = exp(x * log(2.0));
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+  NumericalTest("exp2", exp2<double, 2>, -1.0);
+  NumericalTest("exp2", exp2<double, 2>, -1e-5);
+  NumericalTest("exp2", exp2<double, 2>, -1e-200);
+  NumericalTest("exp2", exp2<double, 2>, 0.0);
+  NumericalTest("exp2", exp2<double, 2>, 1e-200);
+  NumericalTest("exp2", exp2<double, 2>, 1e-5);
+  NumericalTest("exp2", exp2<double, 2>, 1.0);
+
+  { // Check that log2(x) == log(x) / log(2)
+    J z = log2(x);
+    J w = log(x) / log(2.0);
+    VL << "z = " << z;
+    VL << "w = " << w;
+    ExpectJetsClose(z, w);
+  }
+  NumericalTest("log2", log2<double, 2>, 1e-5);
+  NumericalTest("log2", log2<double, 2>, 1.0);
+  NumericalTest("log2", log2<double, 2>, 100.0);
+
+  { // Check that hypot(x, y) == sqrt(x^2 + y^2)
+    J h = hypot(x, y);
+    J s = sqrt(x*x + y*y);
+    VL << "h = " << h;
+    VL << "s = " << s;
+    ExpectJetsClose(h, s);
+  }
+
+  { // Check that hypot(x, x) == sqrt(2) * abs(x)
+    J h = hypot(x, x);
+    J s = sqrt(2.0) * abs(x);
+    VL << "h = " << h;
+    VL << "s = " << s;
+    ExpectJetsClose(h, s);
+  }
+
+  { // Check that the derivative is zero tangentially to the circle:
+    J h = hypot(MakeJet(2.0, 1.0, 1.0), MakeJet(2.0, 1.0, -1.0));
+    VL << "h = " << h;
+    ExpectJetsClose(h, MakeJet(sqrt(8.0), std::sqrt(2.0), 0.0));
+  }
+
+  { // Check that hypot(x, 0) == x
+    J zero = MakeJet(0.0, 2.0, 3.14);
+    J h = hypot(x, zero);
+    VL << "h = " << h;
+    ExpectJetsClose(x, h);
+  }
+
+  { // Check that hypot(0, y) == y
+    J zero = MakeJet(0.0, 2.0, 3.14);
+    J h = hypot(zero, y);
+    VL << "h = " << h;
+    ExpectJetsClose(y, h);
+  }
+
+  { // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x underflows:
+    EXPECT_EQ(DBL_MIN * DBL_MIN, 0.0); // Make sure it underflows
+    J huge = MakeJet(DBL_MIN, 2.0, 3.14);
+    J h = hypot(huge, J(0.0));
+    VL << "h = " << h;
+    ExpectJetsClose(h, huge);
+  }
+
+  { // Check that hypot(x, 0) == sqrt(x * x) == x, even when x * x overflows:
+    EXPECT_EQ(DBL_MAX * DBL_MAX, std::numeric_limits<double>::infinity());
+    J huge = MakeJet(DBL_MAX, 2.0, 3.14);
+    J h = hypot(huge, J(0.0));
+    VL << "h = " << h;
+    ExpectJetsClose(h, huge);
+  }
+
+  NumericalTest2("hypot", hypot<double, 2>,  0.0,   1e-5);
+  NumericalTest2("hypot", hypot<double, 2>, -1e-5,  0.0);
+  NumericalTest2("hypot", hypot<double, 2>,  1e-5,  1e-5);
+  NumericalTest2("hypot", hypot<double, 2>,  0.0,   1.0);
+  NumericalTest2("hypot", hypot<double, 2>,  1e-3,  1.0);
+  NumericalTest2("hypot", hypot<double, 2>,  1e-3, -1.0);
+  NumericalTest2("hypot", hypot<double, 2>, -1e-3,  1.0);
+  NumericalTest2("hypot", hypot<double, 2>, -1e-3, -1.0);
+  NumericalTest2("hypot", hypot<double, 2>,  1.0,   2.0);
+
+  {
+    J z = fmax(x, y);
+    VL << "z = " << z;
+    ExpectJetsClose(x, z);
+  }
+
+  {
+    J z = fmin(x, y);
+    VL << "z = " << z;
+    ExpectJetsClose(y, z);
+  }
+
+}
+
+TEST(Jet, JetsInEigenMatrices) {
+  J x = MakeJet(2.3, -2.7, 1e-3);
+  J y = MakeJet(1.7,  0.5, 1e+2);
+  J z = MakeJet(5.3, -4.7, 1e-3);
+  J w = MakeJet(9.7,  1.5, 10.1);
+
+  Eigen::Matrix<J, 2, 2> M;
+  Eigen::Matrix<J, 2, 1> v, r1, r2;
+
+  M << x, y, z, w;
+  v << x, z;
+
+  // Check that M * v == (v^T * M^T)^T
+  r1 = M * v;
+  r2 = (v.transpose() * M.transpose()).transpose();
+
+  ExpectJetsClose(r1(0), r2(0));
+  ExpectJetsClose(r1(1), r2(1));
+}
+
+TEST(JetTraitsTest, ClassificationMixed) {
+  Jet<double, 3> a(5.5, 0);
+  a.v[0] = std::numeric_limits<double>::quiet_NaN();
+  a.v[1] = std::numeric_limits<double>::infinity();
+  a.v[2] = -std::numeric_limits<double>::infinity();
+  EXPECT_FALSE(IsFinite(a));
+  EXPECT_FALSE(IsNormal(a));
+  EXPECT_TRUE(IsInfinite(a));
+  EXPECT_TRUE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationNaN) {
+  Jet<double, 3> a(5.5, 0);
+  a.v[0] = std::numeric_limits<double>::quiet_NaN();
+  a.v[1] = 0.0;
+  a.v[2] = 0.0;
+  EXPECT_FALSE(IsFinite(a));
+  EXPECT_FALSE(IsNormal(a));
+  EXPECT_FALSE(IsInfinite(a));
+  EXPECT_TRUE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationInf) {
+  Jet<double, 3> a(5.5, 0);
+  a.v[0] = std::numeric_limits<double>::infinity();
+  a.v[1] = 0.0;
+  a.v[2] = 0.0;
+  EXPECT_FALSE(IsFinite(a));
+  EXPECT_FALSE(IsNormal(a));
+  EXPECT_TRUE(IsInfinite(a));
+  EXPECT_FALSE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationFinite) {
+  Jet<double, 3> a(5.5, 0);
+  a.v[0] = 100.0;
+  a.v[1] = 1.0;
+  a.v[2] = 3.14159;
+  EXPECT_TRUE(IsFinite(a));
+  EXPECT_TRUE(IsNormal(a));
+  EXPECT_FALSE(IsInfinite(a));
+  EXPECT_FALSE(IsNaN(a));
+}
+
+#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
+
+// The following test ensures that Jets have all the appropriate Eigen
+// related traits so that they can be used as part of matrix
+// decompositions.
+TEST(Jet, FullRankEigenLLTSolve) {
+  Eigen::Matrix<J, 3, 3> A;
+  Eigen::Matrix<J, 3, 1> b, x;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      A(i,j) = MakeJet(0.0, i, j * j);
+    }
+    b(i) = MakeJet(i, i, i);
+    x(i) = MakeJet(0.0, 0.0, 0.0);
+    A(i,i) = MakeJet(1.0, i, i * i);
+  }
+  x = A.llt().solve(b);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_EQ(x(i).a, b(i).a);
+  }
+}
+
+TEST(Jet, FullRankEigenLDLTSolve) {
+  Eigen::Matrix<J, 3, 3> A;
+  Eigen::Matrix<J, 3, 1> b, x;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      A(i,j) = MakeJet(0.0, i, j * j);
+    }
+    b(i) = MakeJet(i, i, i);
+    x(i) = MakeJet(0.0, 0.0, 0.0);
+    A(i,i) = MakeJet(1.0, i, i * i);
+  }
+  x = A.ldlt().solve(b);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_EQ(x(i).a, b(i).a);
+  }
+}
+
+TEST(Jet, FullRankEigenLUSolve) {
+  Eigen::Matrix<J, 3, 3> A;
+  Eigen::Matrix<J, 3, 1> b, x;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      A(i,j) = MakeJet(0.0, i, j * j);
+    }
+    b(i) = MakeJet(i, i, i);
+    x(i) = MakeJet(0.0, 0.0, 0.0);
+    A(i,i) = MakeJet(1.0, i, i * i);
+  }
+
+  x = A.lu().solve(b);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_EQ(x(i).a, b(i).a);
+  }
+}
+
+// ScalarBinaryOpTraits is only supported on Eigen versions >= 3.3
+TEST(JetTraitsTest, MatrixScalarUnaryOps) {
+  const J x = MakeJet(2.3, -2.7, 1e-3);
+  const J y = MakeJet(1.7,  0.5, 1e+2);
+  Eigen::Matrix<J, 2, 1> a;
+  a << x, y;
+
+  const J sum = a.sum();
+  const J sum2 = a(0) + a(1);
+  ExpectJetsClose(sum, sum2);
+}
+
+TEST(JetTraitsTest, MatrixScalarBinaryOps) {
+  const J x = MakeJet(2.3, -2.7, 1e-3);
+  const J y = MakeJet(1.7,  0.5, 1e+2);
+  const J z = MakeJet(5.3, -4.7, 1e-3);
+  const J w = MakeJet(9.7,  1.5, 10.1);
+
+  Eigen::Matrix<J, 2, 2> M;
+  Eigen::Vector2d v;
+
+  M << x, y, z, w;
+  v << 0.6, -2.1;
+
+  // Check that M * v == M * v.cast<J>().
+  const Eigen::Matrix<J, 2, 1> r1 = M * v;
+  const Eigen::Matrix<J, 2, 1> r2 = M * v.cast<J>();
+
+  ExpectJetsClose(r1(0), r2(0));
+  ExpectJetsClose(r1(1), r2(1));
+
+  // Check that M * a == M * T(a).
+  const double a = 3.1;
+  const Eigen::Matrix<J, 2, 2> r3 = M * a;
+  const Eigen::Matrix<J, 2, 2> r4 = M * J(a);
+
+  ExpectJetsClose(r3(0, 0), r4(0, 0));
+  ExpectJetsClose(r3(1, 0), r4(1, 0));
+  ExpectJetsClose(r3(0, 1), r4(0, 1));
+  ExpectJetsClose(r3(1, 1), r4(1, 1));
+}
+
+TEST(JetTraitsTest, ArrayScalarUnaryOps) {
+  const J x = MakeJet(2.3, -2.7, 1e-3);
+  const J y = MakeJet(1.7,  0.5, 1e+2);
+  Eigen::Array<J, 2, 1> a;
+  a << x, y;
+
+  const J sum = a.sum();
+  const J sum2 = a(0) + a(1);
+  ExpectJetsClose(sum, sum2);
+}
+
+TEST(JetTraitsTest, ArrayScalarBinaryOps) {
+  const J x = MakeJet(2.3, -2.7, 1e-3);
+  const J y = MakeJet(1.7,  0.5, 1e+2);
+
+  Eigen::Array<J, 2, 1> a;
+  Eigen::Array2d b;
+
+  a << x, y;
+  b << 0.6, -2.1;
+
+  // Check that a * b == a * b.cast<T>()
+  const Eigen::Array<J, 2, 1> r1 = a * b;
+  const Eigen::Array<J, 2, 1> r2 = a * b.cast<J>();
+
+  ExpectJetsClose(r1(0), r2(0));
+  ExpectJetsClose(r1(1), r2(1));
+
+  // Check that a * c == a * T(c).
+  const double c = 3.1;
+  const Eigen::Array<J, 2, 1> r3 = a * c;
+  const Eigen::Array<J, 2, 1> r4 = a * J(c);
+
+  ExpectJetsClose(r3(0), r3(0));
+  ExpectJetsClose(r4(1), r4(1));
+}
+#endif   // EIGEN_VERSION_AT_LEAST(3, 3, 0)
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/lapack.cc b/internal/ceres/lapack.cc
new file mode 100644
index 0000000..6fc23f4
--- /dev/null
+++ b/internal/ceres/lapack.cc
@@ -0,0 +1,193 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/lapack.h"
+
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+#include "glog/logging.h"
+
+// C interface to the LAPACK Cholesky factorization and triangular solve.
+extern "C" void dpotrf_(char* uplo,
+                       int* n,
+                       double* a,
+                       int* lda,
+                       int* info);
+
+extern "C" void dpotrs_(char* uplo,
+                        int* n,
+                        int* nrhs,
+                        double* a,
+                        int* lda,
+                        double* b,
+                        int* ldb,
+                        int* info);
+
+extern "C" void dgels_(char* uplo,
+                       int* m,
+                       int* n,
+                       int* nrhs,
+                       double* a,
+                       int* lda,
+                       double* b,
+                       int* ldb,
+                       double* work,
+                       int* lwork,
+                       int* info);
+
+
+namespace ceres {
+namespace internal {
+
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingCholesky(
+    int num_rows,
+    const double* in_lhs,
+    double* rhs_and_solution,
+    std::string* message) {
+#ifdef CERES_NO_LAPACK
+  LOG(FATAL) << "Ceres was built without a BLAS library.";
+  return LINEAR_SOLVER_FATAL_ERROR;
+#else
+  char uplo = 'L';
+  int n = num_rows;
+  int info = 0;
+  int nrhs = 1;
+  double* lhs = const_cast<double*>(in_lhs);
+
+  dpotrf_(&uplo, &n, lhs, &n, &info);
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dpotrf fatal error."
+               << "Argument: " << -info << " is invalid.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  if (info > 0) {
+    *message =
+        StringPrintf(
+            "LAPACK::dpotrf numerical failure. "
+             "The leading minor of order %d is not positive definite.", info);
+    return LINEAR_SOLVER_FAILURE;
+  }
+
+  dpotrs_(&uplo, &n, &nrhs, lhs, &n, rhs_and_solution, &n, &info);
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dpotrs fatal error."
+               << "Argument: " << -info << " is invalid.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  *message = "Success";
+  return LINEAR_SOLVER_SUCCESS;
+#endif
+}
+
+int LAPACK::EstimateWorkSizeForQR(int num_rows, int num_cols) {
+#ifdef CERES_NO_LAPACK
+  LOG(FATAL) << "Ceres was built without a LAPACK library.";
+  return -1;
+#else
+  char trans = 'N';
+  int nrhs = 1;
+  int lwork = -1;
+  double work;
+  int info = 0;
+  dgels_(&trans,
+         &num_rows,
+         &num_cols,
+         &nrhs,
+         NULL,
+         &num_rows,
+         NULL,
+         &num_rows,
+         &work,
+         &lwork,
+         &info);
+
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dgels fatal error."
+               << "Argument: " << -info << " is invalid.";
+  }
+  return static_cast<int>(work);
+#endif
+}
+
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingQR(
+    int num_rows,
+    int num_cols,
+    const double* in_lhs,
+    int work_size,
+    double* work,
+    double* rhs_and_solution,
+    std::string* message) {
+#ifdef CERES_NO_LAPACK
+  LOG(FATAL) << "Ceres was built without a LAPACK library.";
+  return LINEAR_SOLVER_FATAL_ERROR;
+#else
+  char trans = 'N';
+  int m = num_rows;
+  int n = num_cols;
+  int nrhs = 1;
+  int lda = num_rows;
+  int ldb = num_rows;
+  int info = 0;
+  double* lhs = const_cast<double*>(in_lhs);
+
+  dgels_(&trans,
+         &m,
+         &n,
+         &nrhs,
+         lhs,
+         &lda,
+         rhs_and_solution,
+         &ldb,
+         work,
+         &work_size,
+         &info);
+
+  if (info < 0) {
+    LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+               << "Please report it."
+               << "LAPACK::dgels fatal error."
+               << "Argument: " << -info << " is invalid.";
+  }
+
+  *message = "Success.";
+  return LINEAR_SOLVER_SUCCESS;
+#endif
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/lapack.h b/internal/ceres/lapack.h
new file mode 100644
index 0000000..5bb1a22
--- /dev/null
+++ b/internal/ceres/lapack.h
@@ -0,0 +1,100 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LAPACK_H_
+#define CERES_INTERNAL_LAPACK_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class LAPACK {
+ public:
+  // Solve
+  //
+  //  lhs * solution = rhs
+  //
+  // using a Cholesky factorization. Here
+  // lhs is a symmetric positive definite matrix. It is assumed to be
+  // column major and only the lower triangular part of the matrix is
+  // referenced.
+  //
+  // This function uses the LAPACK dpotrf and dpotrs routines.
+  //
+  // The return value and the message string together describe whether
+  // the solver terminated successfully or not and if so, what was the
+  // reason for failure.
+  static LinearSolverTerminationType SolveInPlaceUsingCholesky(
+      int num_rows,
+      const double* lhs,
+      double* rhs_and_solution,
+      std::string* message);
+
+  // The SolveUsingQR function requires a buffer for its temporary
+  // computation. This function given the size of the lhs matrix will
+  // return the size of the buffer needed.
+  static int EstimateWorkSizeForQR(int num_rows, int num_cols);
+
+  // Solve
+  //
+  //  lhs * solution = rhs
+  //
+  // using a dense QR factorization. lhs is an arbitrary (possibly
+  // rectangular) matrix with full column rank.
+  //
+  // work is an array of size work_size that this routine uses for its
+  // temporary storage. The optimal size of this array can be obtained
+  // by calling EstimateWorkSizeForQR.
+  //
+  // When calling, rhs_and_solution contains the rhs, and upon return
+  // the first num_col entries are the solution.
+  //
+  // This function uses the LAPACK dgels routine.
+  //
+  // The return value and the message string together describe whether
+  // the solver terminated successfully or not and if so, what was the
+  // reason for failure.
+  static LinearSolverTerminationType SolveInPlaceUsingQR(
+      int num_rows,
+      int num_cols,
+      const double* lhs,
+      int work_size,
+      double* work,
+      double* rhs_and_solution,
+      std::string* message);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LAPACK_H_
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
new file mode 100644
index 0000000..9eec631
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -0,0 +1,169 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/levenberg_marquardt_strategy.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LevenbergMarquardtStrategy::LevenbergMarquardtStrategy(
+    const TrustRegionStrategy::Options& options)
+    : linear_solver_(options.linear_solver),
+      radius_(options.initial_radius),
+      max_radius_(options.max_radius),
+      min_diagonal_(options.min_lm_diagonal),
+      max_diagonal_(options.max_lm_diagonal),
+      decrease_factor_(2.0),
+      reuse_diagonal_(false) {
+  CHECK(linear_solver_ != nullptr);
+  CHECK_GT(min_diagonal_, 0.0);
+  CHECK_LE(min_diagonal_, max_diagonal_);
+  CHECK_GT(max_radius_, 0.0);
+}
+
+LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {
+}
+
+TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
+    const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+    SparseMatrix* jacobian,
+    const double* residuals,
+    double* step) {
+  CHECK(jacobian != nullptr);
+  CHECK(residuals != nullptr);
+  CHECK(step != nullptr);
+
+  const int num_parameters = jacobian->num_cols();
+  if (!reuse_diagonal_) {
+    if (diagonal_.rows() != num_parameters) {
+      diagonal_.resize(num_parameters, 1);
+    }
+
+    jacobian->SquaredColumnNorm(diagonal_.data());
+    for (int i = 0; i < num_parameters; ++i) {
+      diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
+                              max_diagonal_);
+    }
+  }
+
+  lm_diagonal_ = (diagonal_ / radius_).array().sqrt();
+
+  LinearSolver::PerSolveOptions solve_options;
+  solve_options.D = lm_diagonal_.data();
+  solve_options.q_tolerance = per_solve_options.eta;
+  // Disable r_tolerance checking. Since we only care about
+  // termination via the q_tolerance. As Nash and Sofer show,
+  // r_tolerance based termination is essentially useless in
+  // Truncated Newton methods.
+  solve_options.r_tolerance = -1.0;
+
+  // Invalidate the output array lm_step, so that we can detect if
+  // the linear solver generated numerical garbage.  This is known
+  // to happen for the DENSE_QR and then DENSE_SCHUR solver when
+  // the Jacobin is severely rank deficient and mu is too small.
+  InvalidateArray(num_parameters, step);
+
+  // Instead of solving Jx = -r, solve Jy = r.
+  // Then x can be found as x = -y, but the inputs jacobian and residuals
+  // do not need to be modified.
+  LinearSolver::Summary linear_solver_summary =
+      linear_solver_->Solve(jacobian, residuals, solve_options, step);
+
+  if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    LOG(WARNING) << "Linear solver fatal error: "
+                 << linear_solver_summary.message;
+  } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE)  {
+    LOG(WARNING) << "Linear solver failure. Failed to compute a step: "
+                 << linear_solver_summary.message;
+  } else if (!IsArrayValid(num_parameters, step)) {
+    LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
+    linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
+  } else {
+    VectorRef(step, num_parameters) *= -1.0;
+  }
+  reuse_diagonal_ = true;
+
+  if (per_solve_options.dump_format_type == CONSOLE ||
+      (per_solve_options.dump_format_type != CONSOLE &&
+       !per_solve_options.dump_filename_base.empty())) {
+    if (!DumpLinearLeastSquaresProblem(per_solve_options.dump_filename_base,
+                                       per_solve_options.dump_format_type,
+                                       jacobian,
+                                       solve_options.D,
+                                       residuals,
+                                       step,
+                                       0)) {
+      LOG(ERROR) << "Unable to dump trust region problem."
+                 << " Filename base: " << per_solve_options.dump_filename_base;
+    }
+  }
+
+
+  TrustRegionStrategy::Summary summary;
+  summary.residual_norm = linear_solver_summary.residual_norm;
+  summary.num_iterations = linear_solver_summary.num_iterations;
+  summary.termination_type = linear_solver_summary.termination_type;
+  return summary;
+}
+
+void LevenbergMarquardtStrategy::StepAccepted(double step_quality) {
+  CHECK_GT(step_quality, 0.0);
+  radius_ = radius_ / std::max(1.0 / 3.0,
+                               1.0 - pow(2.0 * step_quality - 1.0, 3));
+  radius_ = std::min(max_radius_, radius_);
+  decrease_factor_ = 2.0;
+  reuse_diagonal_ = false;
+}
+
+void LevenbergMarquardtStrategy::StepRejected(double step_quality) {
+  radius_ = radius_ / decrease_factor_;
+  decrease_factor_ *= 2.0;
+  reuse_diagonal_ = true;
+}
+
+double LevenbergMarquardtStrategy::Radius() const {
+  return radius_;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/levenberg_marquardt_strategy.h b/internal/ceres/levenberg_marquardt_strategy.h
new file mode 100644
index 0000000..c87a016
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy.h
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+#define CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+
+#include "ceres/internal/eigen.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Levenberg-Marquardt step computation and trust region sizing
+// strategy based on on "Methods for Nonlinear Least Squares" by
+// K. Madsen, H.B. Nielsen and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+class LevenbergMarquardtStrategy : public TrustRegionStrategy {
+ public:
+  explicit LevenbergMarquardtStrategy(
+      const TrustRegionStrategy::Options& options);
+  virtual ~LevenbergMarquardtStrategy();
+
+  // TrustRegionStrategy interface
+  virtual TrustRegionStrategy::Summary ComputeStep(
+      const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+      SparseMatrix* jacobian,
+      const double* residuals,
+      double* step);
+  virtual void StepAccepted(double step_quality);
+  virtual void StepRejected(double step_quality);
+  virtual void StepIsInvalid() {
+    // Treat the current step as a rejected step with no increase in
+    // solution quality. Since rejected steps lead to decrease in the
+    // size of the trust region, the next time ComputeStep is called,
+    // this will lead to a better conditioned system.
+    StepRejected(0.0);
+  }
+
+  virtual double Radius() const;
+
+ private:
+  LinearSolver* linear_solver_;
+  double radius_;
+  double max_radius_;
+  const double min_diagonal_;
+  const double max_diagonal_;
+  double decrease_factor_;
+  bool reuse_diagonal_;
+  Vector diagonal_;   // diagonal_ =  diag(J'J)
+  // Scaled copy of diagonal_. Stored here as optimization to prevent
+  // allocations in every iteration and reuse when a step fails and
+  // ComputeStep is called again.
+  Vector lm_diagonal_;  // lm_diagonal_ = diagonal_ / radius_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
new file mode 100644
index 0000000..cfbec71
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -0,0 +1,169 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+#include "ceres/internal/eigen.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gmock/mock-log.h"
+#include "gtest/gtest.h"
+
+using testing::AllOf;
+using testing::AnyNumber;
+using testing::HasSubstr;
+using testing::ScopedMockLog;
+using testing::_;
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 1e-16;
+
+// Linear solver that takes as input a vector and checks that the
+// caller passes the same vector as LinearSolver::PerSolveOptions.D.
+class RegularizationCheckingLinearSolver : public DenseSparseMatrixSolver {
+ public:
+  RegularizationCheckingLinearSolver(const int num_cols, const double* diagonal)
+      : num_cols_(num_cols),
+        diagonal_(diagonal) {
+  }
+
+  virtual ~RegularizationCheckingLinearSolver() {}
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      DenseSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x) {
+    CHECK(per_solve_options.D != nullptr);
+    for (int i = 0; i < num_cols_; ++i) {
+      EXPECT_NEAR(per_solve_options.D[i], diagonal_[i], kTolerance)
+          << i << " " << per_solve_options.D[i] << " " << diagonal_[i];
+    }
+    return LinearSolver::Summary();
+  }
+
+  const int num_cols_;
+  const double* diagonal_;
+};
+
+TEST(LevenbergMarquardtStrategy, AcceptRejectStepRadiusScaling) {
+  TrustRegionStrategy::Options options;
+  options.initial_radius = 2.0;
+  options.max_radius = 20.0;
+  options.min_lm_diagonal = 1e-8;
+  options.max_lm_diagonal = 1e8;
+
+  // We need a non-null pointer here, so anything should do.
+  std::unique_ptr<LinearSolver> linear_solver(
+      new RegularizationCheckingLinearSolver(0, NULL));
+  options.linear_solver = linear_solver.get();
+
+  LevenbergMarquardtStrategy lms(options);
+  EXPECT_EQ(lms.Radius(), options.initial_radius);
+  lms.StepRejected(0.0);
+  EXPECT_EQ(lms.Radius(), 1.0);
+  lms.StepRejected(-1.0);
+  EXPECT_EQ(lms.Radius(), 0.25);
+  lms.StepAccepted(1.0);
+  EXPECT_EQ(lms.Radius(), 0.25 * 3.0);
+  lms.StepAccepted(1.0);
+  EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0);
+  lms.StepAccepted(0.25);
+  EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125);
+  lms.StepAccepted(1.0);
+  EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125 * 3.0);
+  lms.StepAccepted(1.0);
+  EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125 * 3.0 * 3.0);
+  lms.StepAccepted(1.0);
+  EXPECT_EQ(lms.Radius(), options.max_radius);
+}
+
+TEST(LevenbergMarquardtStrategy, CorrectDiagonalToLinearSolver) {
+  Matrix jacobian(2, 3);
+  jacobian.setZero();
+  jacobian(0, 0) = 0.0;
+  jacobian(0, 1) = 1.0;
+  jacobian(1, 1) = 1.0;
+  jacobian(0, 2) = 100.0;
+
+  double residual = 1.0;
+  double x[3];
+  DenseSparseMatrix dsm(jacobian);
+
+  TrustRegionStrategy::Options options;
+  options.initial_radius = 2.0;
+  options.max_radius = 20.0;
+  options.min_lm_diagonal = 1e-2;
+  options.max_lm_diagonal = 1e2;
+
+  double diagonal[3];
+  diagonal[0] = options.min_lm_diagonal;
+  diagonal[1] = 2.0;
+  diagonal[2] = options.max_lm_diagonal;
+  for (int i = 0; i < 3; ++i) {
+    diagonal[i] = sqrt(diagonal[i] / options.initial_radius);
+  }
+
+  RegularizationCheckingLinearSolver linear_solver(3, diagonal);
+  options.linear_solver = &linear_solver;
+
+  LevenbergMarquardtStrategy lms(options);
+  TrustRegionStrategy::PerSolveOptions pso;
+
+  {
+    ScopedMockLog log;
+    EXPECT_CALL(log, Log(_, _, _)).Times(AnyNumber());
+    // This using directive is needed get around the fact that there
+    // are versions of glog which are not in the google namespace.
+    using namespace google;
+
+#if defined(_MSC_VER)
+    // Use GLOG_WARNING to support MSVC if GLOG_NO_ABBREVIATED_SEVERITIES
+    // is defined.
+    EXPECT_CALL(log, Log(GLOG_WARNING, _,
+                         HasSubstr("Failed to compute a step")));
+#else
+    EXPECT_CALL(log, Log(google::WARNING, _,
+                         HasSubstr("Failed to compute a step")));
+#endif
+
+    TrustRegionStrategy::Summary summary =
+        lms.ComputeStep(pso, &dsm, &residual, x);
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_FAILURE);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
new file mode 100644
index 0000000..352c64f
--- /dev/null
+++ b/internal/ceres/line_search.cc
@@ -0,0 +1,878 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/line_search.h"
+
+#include <algorithm>
+#include <cmath>
+#include <iomanip>
+#include <iostream>  // NOLINT
+
+#include "ceres/evaluator.h"
+#include "ceres/function_sample.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/map_util.h"
+#include "ceres/polynomial.h"
+#include "ceres/stringprintf.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::map;
+using std::ostream;
+using std::string;
+using std::vector;
+
+namespace {
+// Precision used for floating point values in error message output.
+const int kErrorMessageNumericPrecision = 8;
+}  // namespace
+
+ostream& operator<<(ostream &os, const FunctionSample& sample);
+
+// Convenience stream operator for pushing FunctionSamples into log messages.
+ostream& operator<<(ostream &os, const FunctionSample& sample) {
+  os << sample.ToDebugString();
+  return os;
+}
+
+LineSearch::LineSearch(const LineSearch::Options& options)
+    : options_(options) {}
+
+LineSearch* LineSearch::Create(const LineSearchType line_search_type,
+                               const LineSearch::Options& options,
+                               string* error) {
+  LineSearch* line_search = NULL;
+  switch (line_search_type) {
+  case ceres::ARMIJO:
+    line_search = new ArmijoLineSearch(options);
+    break;
+  case ceres::WOLFE:
+    line_search = new WolfeLineSearch(options);
+    break;
+  default:
+    *error = string("Invalid line search algorithm type: ") +
+        LineSearchTypeToString(line_search_type) +
+        string(", unable to create line search.");
+    return NULL;
+  }
+  return line_search;
+}
+
+LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
+    : evaluator_(evaluator),
+      position_(evaluator->NumParameters()),
+      direction_(evaluator->NumEffectiveParameters()),
+      scaled_direction_(evaluator->NumEffectiveParameters()),
+      initial_evaluator_residual_time_in_seconds(0.0),
+      initial_evaluator_jacobian_time_in_seconds(0.0) {}
+
+void LineSearchFunction::Init(const Vector& position,
+                              const Vector& direction) {
+  position_ = position;
+  direction_ = direction;
+}
+
+void LineSearchFunction::Evaluate(const double x,
+                                  const bool evaluate_gradient,
+                                  FunctionSample* output) {
+  output->x = x;
+  output->vector_x_is_valid = false;
+  output->value_is_valid = false;
+  output->gradient_is_valid = false;
+  output->vector_gradient_is_valid = false;
+
+  scaled_direction_ = output->x * direction_;
+  output->vector_x.resize(position_.rows(), 1);
+  if (!evaluator_->Plus(position_.data(),
+                        scaled_direction_.data(),
+                        output->vector_x.data())) {
+    return;
+  }
+  output->vector_x_is_valid = true;
+
+  double* gradient = NULL;
+  if (evaluate_gradient) {
+    output->vector_gradient.resize(direction_.rows(), 1);
+    gradient = output->vector_gradient.data();
+  }
+  const bool eval_status = evaluator_->Evaluate(
+      output->vector_x.data(), &(output->value), NULL, gradient, NULL);
+
+  if (!eval_status || !std::isfinite(output->value)) {
+    return;
+  }
+
+  output->value_is_valid = true;
+  if (!evaluate_gradient) {
+    return;
+  }
+
+  output->gradient = direction_.dot(output->vector_gradient);
+  if (!std::isfinite(output->gradient)) {
+    return;
+  }
+
+  output->gradient_is_valid = true;
+  output->vector_gradient_is_valid = true;
+}
+
+double LineSearchFunction::DirectionInfinityNorm() const {
+  return direction_.lpNorm<Eigen::Infinity>();
+}
+
+void LineSearchFunction::ResetTimeStatistics() {
+  const map<string, CallStatistics> evaluator_statistics =
+      evaluator_->Statistics();
+
+  initial_evaluator_residual_time_in_seconds =
+      FindWithDefault(
+          evaluator_statistics, "Evaluator::Residual", CallStatistics())
+          .time;
+  initial_evaluator_jacobian_time_in_seconds =
+      FindWithDefault(
+          evaluator_statistics, "Evaluator::Jacobian", CallStatistics())
+          .time;
+}
+
+void LineSearchFunction::TimeStatistics(
+    double* cost_evaluation_time_in_seconds,
+    double* gradient_evaluation_time_in_seconds) const {
+  const map<string, CallStatistics> evaluator_time_statistics =
+      evaluator_->Statistics();
+  *cost_evaluation_time_in_seconds =
+      FindWithDefault(
+          evaluator_time_statistics, "Evaluator::Residual", CallStatistics())
+          .time -
+      initial_evaluator_residual_time_in_seconds;
+  // Strictly speaking this will slightly underestimate the time spent
+  // evaluating the gradient of the line search univariate cost function as it
+  // does not count the time spent performing the dot product with the direction
+  // vector.  However, this will typically be small by comparison, and also
+  // allows direct subtraction of the timing information from the totals for
+  // the evaluator returned in the solver summary.
+  *gradient_evaluation_time_in_seconds =
+      FindWithDefault(
+          evaluator_time_statistics, "Evaluator::Jacobian", CallStatistics())
+          .time -
+      initial_evaluator_jacobian_time_in_seconds;
+}
+
+void LineSearch::Search(double step_size_estimate,
+                        double initial_cost,
+                        double initial_gradient,
+                        Summary* summary) const {
+  const double start_time = WallTimeInSeconds();
+  CHECK(summary != nullptr);
+  *summary = LineSearch::Summary();
+
+  summary->cost_evaluation_time_in_seconds = 0.0;
+  summary->gradient_evaluation_time_in_seconds = 0.0;
+  summary->polynomial_minimization_time_in_seconds = 0.0;
+  options().function->ResetTimeStatistics();
+  this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
+  options().function->
+      TimeStatistics(&summary->cost_evaluation_time_in_seconds,
+                     &summary->gradient_evaluation_time_in_seconds);
+
+  summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
+}
+
+// Returns step_size \in [min_step_size, max_step_size] which minimizes the
+// polynomial of degree defined by interpolation_type which interpolates all
+// of the provided samples with valid values.
+double LineSearch::InterpolatingPolynomialMinimizingStepSize(
+    const LineSearchInterpolationType& interpolation_type,
+    const FunctionSample& lowerbound,
+    const FunctionSample& previous,
+    const FunctionSample& current,
+    const double min_step_size,
+    const double max_step_size) const {
+  if (!current.value_is_valid ||
+      (interpolation_type == BISECTION &&
+       max_step_size <= current.x)) {
+    // Either: sample is invalid; or we are using BISECTION and contracting
+    // the step size.
+    return std::min(std::max(current.x * 0.5, min_step_size), max_step_size);
+  } else if (interpolation_type == BISECTION) {
+    CHECK_GT(max_step_size, current.x);
+    // We are expanding the search (during a Wolfe bracketing phase) using
+    // BISECTION interpolation.  Using BISECTION when trying to expand is
+    // strictly speaking an oxymoron, but we define this to mean always taking
+    // the maximum step size so that the Armijo & Wolfe implementations are
+    // agnostic to the interpolation type.
+    return max_step_size;
+  }
+  // Only check if lower-bound is valid here, where it is required
+  // to avoid replicating current.value_is_valid == false
+  // behaviour in WolfeLineSearch.
+  CHECK(lowerbound.value_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+      << "Ceres bug: lower-bound sample for interpolation is invalid, "
+      << "please contact the developers!, interpolation_type: "
+      << LineSearchInterpolationTypeToString(interpolation_type)
+      << ", lowerbound: " << lowerbound << ", previous: " << previous
+      << ", current: " << current;
+
+  // Select step size by interpolating the function and gradient values
+  // and minimizing the corresponding polynomial.
+  vector<FunctionSample> samples;
+  samples.push_back(lowerbound);
+
+  if (interpolation_type == QUADRATIC) {
+    // Two point interpolation using function values and the
+    // gradient at the lower bound.
+    samples.push_back(FunctionSample(current.x, current.value));
+
+    if (previous.value_is_valid) {
+      // Three point interpolation, using function values and the
+      // gradient at the lower bound.
+      samples.push_back(FunctionSample(previous.x, previous.value));
+    }
+  } else if (interpolation_type == CUBIC) {
+    // Two point interpolation using the function values and the gradients.
+    samples.push_back(current);
+
+    if (previous.value_is_valid) {
+      // Three point interpolation using the function values and
+      // the gradients.
+      samples.push_back(previous);
+    }
+  } else {
+    LOG(FATAL) << "Ceres bug: No handler for interpolation_type: "
+               << LineSearchInterpolationTypeToString(interpolation_type)
+               << ", please contact the developers!";
+  }
+
+  double step_size = 0.0, unused_min_value = 0.0;
+  MinimizeInterpolatingPolynomial(samples, min_step_size, max_step_size,
+                                  &step_size, &unused_min_value);
+  return step_size;
+}
+
+ArmijoLineSearch::ArmijoLineSearch(const LineSearch::Options& options)
+    : LineSearch(options) {}
+
+void ArmijoLineSearch::DoSearch(const double step_size_estimate,
+                                const double initial_cost,
+                                const double initial_gradient,
+                                Summary* summary) const {
+  CHECK_GE(step_size_estimate, 0.0);
+  CHECK_GT(options().sufficient_decrease, 0.0);
+  CHECK_LT(options().sufficient_decrease, 1.0);
+  CHECK_GT(options().max_num_iterations, 0);
+  LineSearchFunction* function = options().function;
+
+  // Note initial_cost & initial_gradient are evaluated at step_size = 0,
+  // not step_size_estimate, which is our starting guess.
+  FunctionSample initial_position(0.0, initial_cost, initial_gradient);
+  initial_position.vector_x = function->position();
+  initial_position.vector_x_is_valid = true;
+
+  const double descent_direction_max_norm = function->DirectionInfinityNorm();
+  FunctionSample previous;
+  FunctionSample current;
+
+  // As the Armijo line search algorithm always uses the initial point, for
+  // which both the function value and derivative are known, when fitting a
+  // minimizing polynomial, we can fit up to a quadratic without requiring the
+  // gradient at the current query point.
+  const bool kEvaluateGradient = options().interpolation_type == CUBIC;
+
+  ++summary->num_function_evaluations;
+  if (kEvaluateGradient) {
+    ++summary->num_gradient_evaluations;
+  }
+
+  function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
+  while (!current.value_is_valid ||
+         current.value > (initial_cost
+                          + options().sufficient_decrease
+                          * initial_gradient
+                          * current.x)) {
+    // If current.value_is_valid is false, we treat it as if the cost at that
+    // point is not large enough to satisfy the sufficient decrease condition.
+    ++summary->num_iterations;
+    if (summary->num_iterations >= options().max_num_iterations) {
+      summary->error =
+          StringPrintf("Line search failed: Armijo failed to find a point "
+                       "satisfying the sufficient decrease condition within "
+                       "specified max_num_iterations: %d.",
+                       options().max_num_iterations);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return;
+    }
+
+    const double polynomial_minimization_start_time = WallTimeInSeconds();
+    const double step_size =
+        this->InterpolatingPolynomialMinimizingStepSize(
+            options().interpolation_type,
+            initial_position,
+            previous,
+            current,
+            (options().max_step_contraction * current.x),
+            (options().min_step_contraction * current.x));
+    summary->polynomial_minimization_time_in_seconds +=
+        (WallTimeInSeconds() - polynomial_minimization_start_time);
+
+    if (step_size * descent_direction_max_norm < options().min_step_size) {
+      summary->error =
+          StringPrintf("Line search failed: step_size too small: %.5e "
+                       "with descent_direction_max_norm: %.5e.", step_size,
+                       descent_direction_max_norm);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return;
+    }
+
+    previous = current;
+
+    ++summary->num_function_evaluations;
+    if (kEvaluateGradient) {
+      ++summary->num_gradient_evaluations;
+    }
+
+    function->Evaluate(step_size, kEvaluateGradient, &current);
+  }
+
+  summary->optimal_point = current;
+  summary->success = true;
+}
+
+WolfeLineSearch::WolfeLineSearch(const LineSearch::Options& options)
+    : LineSearch(options) {}
+
+void WolfeLineSearch::DoSearch(const double step_size_estimate,
+                               const double initial_cost,
+                               const double initial_gradient,
+                               Summary* summary) const {
+  // All parameters should have been validated by the Solver, but as
+  // invalid values would produce crazy nonsense, hard check them here.
+  CHECK_GE(step_size_estimate, 0.0);
+  CHECK_GT(options().sufficient_decrease, 0.0);
+  CHECK_GT(options().sufficient_curvature_decrease,
+           options().sufficient_decrease);
+  CHECK_LT(options().sufficient_curvature_decrease, 1.0);
+  CHECK_GT(options().max_step_expansion, 1.0);
+
+  // Note initial_cost & initial_gradient are evaluated at step_size = 0,
+  // not step_size_estimate, which is our starting guess.
+  FunctionSample initial_position(0.0, initial_cost, initial_gradient);
+  initial_position.vector_x = options().function->position();
+  initial_position.vector_x_is_valid = true;
+  bool do_zoom_search = false;
+  // Important: The high/low in bracket_high & bracket_low refer to their
+  // _function_ values, not their step sizes i.e. it is _not_ required that
+  // bracket_low.x < bracket_high.x.
+  FunctionSample solution, bracket_low, bracket_high;
+
+  // Wolfe bracketing phase: Increases step_size until either it finds a point
+  // that satisfies the (strong) Wolfe conditions, or an interval that brackets
+  // step sizes which satisfy the conditions.  From Nocedal & Wright [1] p61 the
+  // interval: (step_size_{k-1}, step_size_{k}) contains step lengths satisfying
+  // the strong Wolfe conditions if one of the following conditions are met:
+  //
+  //   1. step_size_{k} violates the sufficient decrease (Armijo) condition.
+  //   2. f(step_size_{k}) >= f(step_size_{k-1}).
+  //   3. f'(step_size_{k}) >= 0.
+  //
+  // Caveat: If f(step_size_{k}) is invalid, then step_size is reduced, ignoring
+  // this special case, step_size monotonically increases during bracketing.
+  if (!this->BracketingPhase(initial_position,
+                             step_size_estimate,
+                             &bracket_low,
+                             &bracket_high,
+                             &do_zoom_search,
+                             summary)) {
+    // Failed to find either a valid point, a valid bracket satisfying the Wolfe
+    // conditions, or even a step size > minimum tolerance satisfying the Armijo
+    // condition.
+    return;
+  }
+
+  if (!do_zoom_search) {
+    // Either: Bracketing phase already found a point satisfying the strong
+    // Wolfe conditions, thus no Zoom required.
+    //
+    // Or: Bracketing failed to find a valid bracket or a point satisfying the
+    // strong Wolfe conditions within max_num_iterations, or whilst searching
+    // shrank the bracket width until it was below our minimum tolerance.
+    // As these are 'artificial' constraints, and we would otherwise fail to
+    // produce a valid point when ArmijoLineSearch would succeed, we return the
+    // point with the lowest cost found thus far which satsifies the Armijo
+    // condition (but not the Wolfe conditions).
+    summary->optimal_point = bracket_low;
+    summary->success = true;
+    return;
+  }
+
+  VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+          << "Starting line search zoom phase with bracket_low: "
+          << bracket_low << ", bracket_high: " << bracket_high
+          << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
+          << ", bracket abs delta cost: "
+          << fabs(bracket_low.value - bracket_high.value);
+
+  // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of
+  // non-zero, finite width that should bracket step sizes which satisfy the
+  // (strong) Wolfe conditions (before finding a step size that satisfies the
+  // conditions).  Zoom successively decreases the size of the interval until a
+  // step size which satisfies the Wolfe conditions is found.  The interval is
+  // defined by bracket_low & bracket_high, which satisfy:
+  //
+  //   1. The interval bounded by step sizes: bracket_low.x & bracket_high.x
+  //      contains step sizes that satsify the strong Wolfe conditions.
+  //   2. bracket_low.x is of all the step sizes evaluated *which satisifed the
+  //      Armijo sufficient decrease condition*, the one which generated the
+  //      smallest function value, i.e. bracket_low.value <
+  //      f(all other steps satisfying Armijo).
+  //        - Note that this does _not_ (necessarily) mean that initially
+  //          bracket_low.value < bracket_high.value (although this is typical)
+  //          e.g. when bracket_low = initial_position, and bracket_high is the
+  //          first sample, and which does not satisfy the Armijo condition,
+  //          but still has bracket_high.value < initial_position.value.
+  //   3. bracket_high is chosen after bracket_low, s.t.
+  //      bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
+  if (!this->ZoomPhase(initial_position,
+                       bracket_low,
+                       bracket_high,
+                       &solution,
+                       summary) && !solution.value_is_valid) {
+    // Failed to find a valid point (given the specified decrease parameters)
+    // within the specified bracket.
+    return;
+  }
+  // Ensure that if we ran out of iterations whilst zooming the bracket, or
+  // shrank the bracket width to < tolerance and failed to find a point which
+  // satisfies the strong Wolfe curvature condition, that we return the point
+  // amongst those found thus far, which minimizes f() and satisfies the Armijo
+  // condition.
+
+  if (!solution.value_is_valid || solution.value > bracket_low.value) {
+    summary->optimal_point = bracket_low;
+  } else {
+    summary->optimal_point = solution;
+  }
+
+  summary->success = true;
+}
+
+// Returns true if either:
+//
+// A termination condition satisfying the (strong) Wolfe bracketing conditions
+// is found:
+//
+// - A valid point, defined as a bracket of zero width [zoom not required].
+// - A valid bracket (of width > tolerance), [zoom required].
+//
+// Or, searching was stopped due to an 'artificial' constraint, i.e. not
+// a condition imposed / required by the underlying algorithm, but instead an
+// engineering / implementation consideration. But a step which exceeds the
+// minimum step size, and satsifies the Armijo condition was still found,
+// and should thus be used [zoom not required].
+//
+// Returns false if no step size > minimum step size was found which
+// satisfies at least the Armijo condition.
+bool WolfeLineSearch::BracketingPhase(
+    const FunctionSample& initial_position,
+    const double step_size_estimate,
+    FunctionSample* bracket_low,
+    FunctionSample* bracket_high,
+    bool* do_zoom_search,
+    Summary* summary) const {
+  LineSearchFunction* function = options().function;
+
+  FunctionSample previous = initial_position;
+  FunctionSample current;
+
+  const double descent_direction_max_norm =
+      function->DirectionInfinityNorm();
+
+  *do_zoom_search = false;
+  *bracket_low = initial_position;
+
+  // As we require the gradient to evaluate the Wolfe condition, we always
+  // calculate it together with the value, irrespective of the interpolation
+  // type.  As opposed to only calculating the gradient after the Armijo
+  // condition is satisifed, as the computational saving from this approach
+  // would be slight (perhaps even negative due to the extra call).  Also,
+  // always calculating the value & gradient together protects against us
+  // reporting invalid solutions if the cost function returns slightly different
+  // function values when evaluated with / without gradients (due to numerical
+  // issues).
+  ++summary->num_function_evaluations;
+  ++summary->num_gradient_evaluations;
+  const bool kEvaluateGradient = true;
+  function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
+  while (true) {
+    ++summary->num_iterations;
+
+    if (current.value_is_valid &&
+        (current.value > (initial_position.value
+                          + options().sufficient_decrease
+                          * initial_position.gradient
+                          * current.x) ||
+         (previous.value_is_valid && current.value > previous.value))) {
+      // Bracket found: current step size violates Armijo sufficient decrease
+      // condition, or has stepped past an inflection point of f() relative to
+      // previous step size.
+      *do_zoom_search = true;
+      *bracket_low = previous;
+      *bracket_high = current;
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Bracket found: current step (" << current.x
+              << ") violates Armijo sufficient condition, or has passed an "
+              << "inflection point of f() based on value.";
+      break;
+    }
+
+    if (current.value_is_valid &&
+        fabs(current.gradient) <=
+        -options().sufficient_curvature_decrease * initial_position.gradient) {
+      // Current step size satisfies the strong Wolfe conditions, and is thus a
+      // valid termination point, therefore a Zoom not required.
+      *bracket_low = current;
+      *bracket_high = current;
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Bracketing phase found step size: " << current.x
+              << ", satisfying strong Wolfe conditions, initial_position: "
+              << initial_position << ", current: " << current;
+      break;
+
+    } else if (current.value_is_valid && current.gradient >= 0) {
+      // Bracket found: current step size has stepped past an inflection point
+      // of f(), but Armijo sufficient decrease is still satisfied and
+      // f(current) is our best minimum thus far.  Remember step size
+      // monotonically increases, thus previous_step_size < current_step_size
+      // even though f(previous) > f(current).
+      *do_zoom_search = true;
+      // Note inverse ordering from first bracket case.
+      *bracket_low = current;
+      *bracket_high = previous;
+      VLOG(3) << "Bracket found: current step (" << current.x
+              << ") satisfies Armijo, but has gradient >= 0, thus have passed "
+              << "an inflection point of f().";
+      break;
+
+    } else if (current.value_is_valid &&
+               fabs(current.x - previous.x) * descent_direction_max_norm
+               < options().min_step_size) {
+      // We have shrunk the search bracket to a width less than our tolerance,
+      // and still not found either a point satisfying the strong Wolfe
+      // conditions, or a valid bracket containing such a point. Stop searching
+      // and set bracket_low to the size size amongst all those tested which
+      // minimizes f() and satisfies the Armijo condition.
+      LOG_IF(WARNING, !options().is_silent)
+          << "Line search failed: Wolfe bracketing phase shrank "
+          << "bracket width: " << fabs(current.x - previous.x)
+          <<  ", to < tolerance: " << options().min_step_size
+          << ", with descent_direction_max_norm: "
+          << descent_direction_max_norm << ", and failed to find "
+          << "a point satisfying the strong Wolfe conditions or a "
+          << "bracketing containing such a point. Accepting "
+          << "point found satisfying Armijo condition only, to "
+          << "allow continuation.";
+      *bracket_low = current;
+      break;
+
+    } else if (summary->num_iterations >= options().max_num_iterations) {
+      // Check num iterations bound here so that we always evaluate the
+      // max_num_iterations-th iteration against all conditions, and
+      // then perform no additional (unused) evaluations.
+      summary->error =
+          StringPrintf("Line search failed: Wolfe bracketing phase failed to "
+                       "find a point satisfying strong Wolfe conditions, or a "
+                       "bracket containing such a point within specified "
+                       "max_num_iterations: %d", options().max_num_iterations);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      // Ensure that bracket_low is always set to the step size amongst all
+      // those tested which minimizes f() and satisfies the Armijo condition
+      // when we terminate due to the 'artificial' max_num_iterations condition.
+      *bracket_low =
+          current.value_is_valid && current.value < bracket_low->value
+          ? current : *bracket_low;
+      break;
+    }
+    // Either: f(current) is invalid; or, f(current) is valid, but does not
+    // satisfy the strong Wolfe conditions itself, or the conditions for
+    // being a boundary of a bracket.
+
+    // If f(current) is valid, (but meets no criteria) expand the search by
+    // increasing the step size.  If f(current) is invalid, contract the step
+    // size.
+    //
+    // In Nocedal & Wright [1] (p60), the step-size can only increase in the
+    // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k * factor].
+    // However this does not account for the function returning invalid values
+    // which we support, in which case we need to contract the step size whilst
+    // ensuring that we do not invert the bracket, i.e, we require that:
+    // step_size_{k-1} <= step_size_{k+1} < step_size_k.
+    const double min_step_size =
+        current.value_is_valid
+        ? current.x : previous.x;
+    const double max_step_size =
+        current.value_is_valid
+        ? (current.x * options().max_step_expansion) : current.x;
+
+    // We are performing 2-point interpolation only here, but the API of
+    // InterpolatingPolynomialMinimizingStepSize() allows for up to
+    // 3-point interpolation, so pad call with a sample with an invalid
+    // value that will therefore be ignored.
+    const FunctionSample unused_previous;
+    DCHECK(!unused_previous.value_is_valid);
+    // Contracts step size if f(current) is not valid.
+    const double polynomial_minimization_start_time = WallTimeInSeconds();
+    const double step_size =
+        this->InterpolatingPolynomialMinimizingStepSize(
+            options().interpolation_type,
+            previous,
+            unused_previous,
+            current,
+            min_step_size,
+            max_step_size);
+    summary->polynomial_minimization_time_in_seconds +=
+        (WallTimeInSeconds() - polynomial_minimization_start_time);
+    if (step_size * descent_direction_max_norm < options().min_step_size) {
+      summary->error =
+          StringPrintf("Line search failed: step_size too small: %.5e "
+                       "with descent_direction_max_norm: %.5e", step_size,
+                       descent_direction_max_norm);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return false;
+    }
+
+    // Only advance the lower boundary (in x) of the bracket if f(current)
+    // is valid such that we can support contracting the step size when
+    // f(current) is invalid without risking inverting the bracket in x, i.e.
+    // prevent previous.x > current.x.
+    previous = current.value_is_valid ? current : previous;
+    ++summary->num_function_evaluations;
+    ++summary->num_gradient_evaluations;
+    function->Evaluate(step_size, kEvaluateGradient, &current);
+  }
+
+  // Ensure that even if a valid bracket was found, we will only mark a zoom
+  // as required if the bracket's width is greater than our minimum tolerance.
+  if (*do_zoom_search &&
+      fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm
+      < options().min_step_size) {
+    *do_zoom_search = false;
+  }
+
+  return true;
+}
+
+// Returns true iff solution satisfies the strong Wolfe conditions. Otherwise,
+// on return false, if we stopped searching due to the 'artificial' condition of
+// reaching max_num_iterations, solution is the step size amongst all those
+// tested, which satisfied the Armijo decrease condition and minimized f().
+bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
+                                FunctionSample bracket_low,
+                                FunctionSample bracket_high,
+                                FunctionSample* solution,
+                                Summary* summary) const {
+  LineSearchFunction* function = options().function;
+
+  CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+      << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
+      << "the developers!, initial_position: " << initial_position
+      << ", bracket_low: " << bracket_low
+      << ", bracket_high: "<< bracket_high;
+  // We do not require bracket_high.gradient_is_valid as the gradient condition
+  // for a valid bracket is only dependent upon bracket_low.gradient, and
+  // in order to minimize jacobian evaluations, bracket_high.gradient may
+  // not have been calculated (if bracket_high.value does not satisfy the
+  // Armijo sufficient decrease condition and interpolation method does not
+  // require it).
+  //
+  // We also do not require that: bracket_low.value < bracket_high.value,
+  // although this is typical. This is to deal with the case when
+  // bracket_low = initial_position, bracket_high is the first sample,
+  // and bracket_high does not satisfy the Armijo condition, but still has
+  // bracket_high.value < initial_position.value.
+  CHECK(bracket_high.value_is_valid)
+      << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+      << "Ceres bug: f_high input to Wolfe Zoom invalid, please "
+      << "contact the developers!, initial_position: " << initial_position
+      << ", bracket_low: " << bracket_low
+      << ", bracket_high: "<< bracket_high;
+
+  if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
+    // The third condition for a valid initial bracket:
+    //
+    //   3. bracket_high is chosen after bracket_low, s.t.
+    //      bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
+    //
+    // is not satisfied.  As this can happen when the users' cost function
+    // returns inconsistent gradient values relative to the function values,
+    // we do not CHECK_LT(), but we do stop processing and return an invalid
+    // value.
+    summary->error =
+        StringPrintf("Line search failed: Wolfe zoom phase passed a bracket "
+                     "which does not satisfy: bracket_low.gradient * "
+                     "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
+                     "with initial_position: %s, bracket_low: %s, bracket_high:"
+                     " %s, the most likely cause of which is the cost function "
+                     "returning inconsistent gradient & function values.",
+                     bracket_low.gradient * (bracket_high.x - bracket_low.x),
+                     initial_position.ToDebugString().c_str(),
+                     bracket_low.ToDebugString().c_str(),
+                     bracket_high.ToDebugString().c_str());
+    LOG_IF(WARNING, !options().is_silent) << summary->error;
+    solution->value_is_valid = false;
+    return false;
+  }
+
+  const int num_bracketing_iterations = summary->num_iterations;
+  const double descent_direction_max_norm = function->DirectionInfinityNorm();
+
+  while (true) {
+    // Set solution to bracket_low, as it is our best step size (smallest f())
+    // found thus far and satisfies the Armijo condition, even though it does
+    // not satisfy the Wolfe condition.
+    *solution = bracket_low;
+    if (summary->num_iterations >= options().max_num_iterations) {
+      summary->error =
+          StringPrintf("Line search failed: Wolfe zoom phase failed to "
+                       "find a point satisfying strong Wolfe conditions "
+                       "within specified max_num_iterations: %d, "
+                       "(num iterations taken for bracketing: %d).",
+                       options().max_num_iterations, num_bracketing_iterations);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return false;
+    }
+    if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
+        < options().min_step_size) {
+      // Bracket width has been reduced below tolerance, and no point satisfying
+      // the strong Wolfe conditions has been found.
+      summary->error =
+          StringPrintf("Line search failed: Wolfe zoom bracket width: %.5e "
+                       "too small with descent_direction_max_norm: %.5e.",
+                       fabs(bracket_high.x - bracket_low.x),
+                       descent_direction_max_norm);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return false;
+    }
+
+    ++summary->num_iterations;
+    // Polynomial interpolation requires inputs ordered according to step size,
+    // not f(step size).
+    const FunctionSample& lower_bound_step =
+        bracket_low.x < bracket_high.x ? bracket_low : bracket_high;
+    const FunctionSample& upper_bound_step =
+        bracket_low.x < bracket_high.x ? bracket_high : bracket_low;
+    // We are performing 2-point interpolation only here, but the API of
+    // InterpolatingPolynomialMinimizingStepSize() allows for up to
+    // 3-point interpolation, so pad call with a sample with an invalid
+    // value that will therefore be ignored.
+    const FunctionSample unused_previous;
+    DCHECK(!unused_previous.value_is_valid);
+    const double polynomial_minimization_start_time = WallTimeInSeconds();
+    const double step_size =
+        this->InterpolatingPolynomialMinimizingStepSize(
+            options().interpolation_type,
+            lower_bound_step,
+            unused_previous,
+            upper_bound_step,
+            lower_bound_step.x,
+            upper_bound_step.x);
+    summary->polynomial_minimization_time_in_seconds +=
+        (WallTimeInSeconds() - polynomial_minimization_start_time);
+    // No check on magnitude of step size being too small here as it is
+    // lower-bounded by the initial bracket start point, which was valid.
+    //
+    // As we require the gradient to evaluate the Wolfe condition, we always
+    // calculate it together with the value, irrespective of the interpolation
+    // type.  As opposed to only calculating the gradient after the Armijo
+    // condition is satisifed, as the computational saving from this approach
+    // would be slight (perhaps even negative due to the extra call).  Also,
+    // always calculating the value & gradient together protects against us
+    // reporting invalid solutions if the cost function returns slightly
+    // different function values when evaluated with / without gradients (due
+    // to numerical issues).
+    ++summary->num_function_evaluations;
+    ++summary->num_gradient_evaluations;
+    const bool kEvaluateGradient = true;
+    function->Evaluate(step_size, kEvaluateGradient, solution);
+    if (!solution->value_is_valid || !solution->gradient_is_valid) {
+      summary->error =
+          StringPrintf("Line search failed: Wolfe Zoom phase found "
+                       "step_size: %.5e, for which function is invalid, "
+                       "between low_step: %.5e and high_step: %.5e "
+                       "at which function is valid.",
+                       solution->x, bracket_low.x, bracket_high.x);
+      LOG_IF(WARNING, !options().is_silent) << summary->error;
+      return false;
+    }
+
+    VLOG(3) << "Zoom iteration: "
+            << summary->num_iterations - num_bracketing_iterations
+            << ", bracket_low: " << bracket_low
+            << ", bracket_high: " << bracket_high
+            << ", minimizing solution: " << *solution;
+
+    if ((solution->value > (initial_position.value
+                            + options().sufficient_decrease
+                            * initial_position.gradient
+                            * solution->x)) ||
+        (solution->value >= bracket_low.value)) {
+      // Armijo sufficient decrease not satisfied, or not better
+      // than current lowest sample, use as new upper bound.
+      bracket_high = *solution;
+      continue;
+    }
+
+    // Armijo sufficient decrease satisfied, check strong Wolfe condition.
+    if (fabs(solution->gradient) <=
+        -options().sufficient_curvature_decrease * initial_position.gradient) {
+      // Found a valid termination point satisfying strong Wolfe conditions.
+      VLOG(3) << std::scientific
+              << std::setprecision(kErrorMessageNumericPrecision)
+              << "Zoom phase found step size: " << solution->x
+              << ", satisfying strong Wolfe conditions.";
+      break;
+
+    } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) {
+      bracket_high = bracket_low;
+    }
+
+    bracket_low = *solution;
+  }
+  // Solution contains a valid point which satisfies the strong Wolfe
+  // conditions.
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search.h b/internal/ceres/line_search.h
new file mode 100644
index 0000000..a162979
--- /dev/null
+++ b/internal/ceres/line_search.h
@@ -0,0 +1,307 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Interface for and implementation of various Line search algorithms.
+
+#ifndef CERES_INTERNAL_LINE_SEARCH_H_
+#define CERES_INTERNAL_LINE_SEARCH_H_
+
+#include <string>
+#include <vector>
+#include "ceres/function_sample.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Evaluator;
+class LineSearchFunction;
+
+// Line search is another name for a one dimensional optimization
+// algorithm. The name "line search" comes from the fact one
+// dimensional optimization problems that arise as subproblems of
+// general multidimensional optimization problems.
+//
+// While finding the exact minimum of a one dimensional function is
+// hard, instances of LineSearch find a point that satisfies a
+// sufficient decrease condition. Depending on the particular
+// condition used, we get a variety of different line search
+// algorithms, e.g., Armijo, Wolfe etc.
+class LineSearch {
+ public:
+  struct Summary;
+
+  struct Options {
+    // Degree of the polynomial used to approximate the objective
+    // function.
+    LineSearchInterpolationType interpolation_type = CUBIC;
+
+    // Armijo and Wolfe line search parameters.
+
+    // Solving the line search problem exactly is computationally
+    // prohibitive. Fortunately, line search based optimization
+    // algorithms can still guarantee convergence if instead of an
+    // exact solution, the line search algorithm returns a solution
+    // which decreases the value of the objective function
+    // sufficiently. More precisely, we are looking for a step_size
+    // s.t.
+    //
+    //  f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
+    double sufficient_decrease = 1e-4;
+
+    // In each iteration of the Armijo / Wolfe line search,
+    //
+    // new_step_size >= max_step_contraction * step_size
+    //
+    // Note that by definition, for contraction:
+    //
+    //  0 < max_step_contraction < min_step_contraction < 1
+    //
+    double max_step_contraction = 1e-3;
+
+    // In each iteration of the Armijo / Wolfe line search,
+    //
+    // new_step_size <= min_step_contraction * step_size
+    // Note that by definition, for contraction:
+    //
+    //  0 < max_step_contraction < min_step_contraction < 1
+    //
+    double min_step_contraction = 0.9;
+
+    // If during the line search, the step_size falls below this
+    // value, it is truncated to zero.
+    double min_step_size = 1e-9;
+
+    // Maximum number of trial step size iterations during each line search,
+    // if a step size satisfying the search conditions cannot be found within
+    // this number of trials, the line search will terminate.
+    int max_num_iterations = 20;
+
+    // Wolfe-specific line search parameters.
+
+    // The strong Wolfe conditions consist of the Armijo sufficient
+    // decrease condition, and an additional requirement that the
+    // step-size be chosen s.t. the _magnitude_ ('strong' Wolfe
+    // conditions) of the gradient along the search direction
+    // decreases sufficiently. Precisely, this second condition
+    // is that we seek a step_size s.t.
+    //
+    //   |f'(step_size)| <= sufficient_curvature_decrease * |f'(0)|
+    //
+    // Where f() is the line search objective and f'() is the derivative
+    // of f w.r.t step_size (d f / d step_size).
+    double sufficient_curvature_decrease = 0.9;
+
+    // During the bracketing phase of the Wolfe search, the step size is
+    // increased until either a point satisfying the Wolfe conditions is
+    // found, or an upper bound for a bracket containing a point satisfying
+    // the conditions is found.  Precisely, at each iteration of the
+    // expansion:
+    //
+    //   new_step_size <= max_step_expansion * step_size.
+    //
+    // By definition for expansion, max_step_expansion > 1.0.
+    double max_step_expansion = 10;
+
+    bool is_silent = false;
+
+    // The one dimensional function that the line search algorithm
+    // minimizes.
+    LineSearchFunction* function = nullptr;
+  };
+
+  // Result of the line search.
+  struct Summary {
+    bool success = false;
+    FunctionSample optimal_point;
+    int num_function_evaluations = 0;
+    int num_gradient_evaluations = 0;
+    int num_iterations = 0;
+    // Cumulative time spent evaluating the value of the cost function across
+    // all iterations.
+    double cost_evaluation_time_in_seconds = 0.0;
+    // Cumulative time spent evaluating the gradient of the cost function across
+    // all iterations.
+    double gradient_evaluation_time_in_seconds = 0.0;
+    // Cumulative time spent minimizing the interpolating polynomial to compute
+    // the next candidate step size across all iterations.
+    double polynomial_minimization_time_in_seconds = 0.0;
+    double total_time_in_seconds = 0.0;
+    std::string error;
+  };
+
+  explicit LineSearch(const LineSearch::Options& options);
+  virtual ~LineSearch() {}
+
+  static LineSearch* Create(const LineSearchType line_search_type,
+                            const LineSearch::Options& options,
+                            std::string* error);
+
+  // Perform the line search.
+  //
+  // step_size_estimate must be a positive number.
+  //
+  // initial_cost and initial_gradient are the values and gradient of
+  // the function at zero.
+  // summary must not be null and will contain the result of the line
+  // search.
+  //
+  // Summary::success is true if a non-zero step size is found.
+  void Search(double step_size_estimate,
+              double initial_cost,
+              double initial_gradient,
+              Summary* summary) const;
+  double InterpolatingPolynomialMinimizingStepSize(
+      const LineSearchInterpolationType& interpolation_type,
+      const FunctionSample& lowerbound_sample,
+      const FunctionSample& previous_sample,
+      const FunctionSample& current_sample,
+      const double min_step_size,
+      const double max_step_size) const;
+
+ protected:
+  const LineSearch::Options& options() const { return options_; }
+
+ private:
+  virtual void DoSearch(double step_size_estimate,
+                        double initial_cost,
+                        double initial_gradient,
+                        Summary* summary) const = 0;
+
+ private:
+  LineSearch::Options options_;
+};
+
+// An object used by the line search to access the function values
+// and gradient of the one dimensional function being optimized.
+//
+// In practice, this object provides access to the objective
+// function value and the directional derivative of the underlying
+// optimization problem along a specific search direction.
+class LineSearchFunction {
+ public:
+  explicit LineSearchFunction(Evaluator* evaluator);
+  void Init(const Vector& position, const Vector& direction);
+
+  // Evaluate the line search objective
+  //
+  //   f(x) = p(position + x * direction)
+  //
+  // Where, p is the objective function of the general optimization
+  // problem.
+  //
+  // evaluate_gradient controls whether the gradient will be evaluated
+  // or not.
+  //
+  // On return output->*_is_valid indicate indicate whether the
+  // corresponding fields have numerically valid values or not.
+  void Evaluate(double x, bool evaluate_gradient, FunctionSample* output);
+
+  double DirectionInfinityNorm() const;
+
+  // Resets to now, the start point for the results from TimeStatistics().
+  void ResetTimeStatistics();
+  void TimeStatistics(double* cost_evaluation_time_in_seconds,
+                      double* gradient_evaluation_time_in_seconds) const;
+  const Vector& position() const { return position_; }
+  const Vector& direction() const { return direction_; }
+
+ private:
+  Evaluator* evaluator_;
+  Vector position_;
+  Vector direction_;
+
+  // scaled_direction = x * direction_;
+  Vector scaled_direction_;
+
+  // We may not exclusively own the evaluator (e.g. in the Trust Region
+  // minimizer), hence we need to save the initial evaluation durations for the
+  // value & gradient to accurately determine the duration of the evaluations
+  // we invoked.  These are reset by a call to ResetTimeStatistics().
+  double initial_evaluator_residual_time_in_seconds;
+  double initial_evaluator_jacobian_time_in_seconds;
+};
+
+// Backtracking and interpolation based Armijo line search. This
+// implementation is based on the Armijo line search that ships in the
+// minFunc package by Mark Schmidt.
+//
+// For more details: http://www.di.ens.fr/~mschmidt/Software/minFunc.html
+class ArmijoLineSearch : public LineSearch {
+ public:
+  explicit ArmijoLineSearch(const LineSearch::Options& options);
+  virtual ~ArmijoLineSearch() {}
+
+ private:
+  virtual void DoSearch(double step_size_estimate,
+                        double initial_cost,
+                        double initial_gradient,
+                        Summary* summary) const;
+};
+
+// Bracketing / Zoom Strong Wolfe condition line search.  This implementation
+// is based on the pseudo-code algorithm presented in Nocedal & Wright [1]
+// (p60-61) with inspiration from the WolfeLineSearch which ships with the
+// minFunc package by Mark Schmidt [2].
+//
+// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed., Springer, 1999.
+// [2] http://www.di.ens.fr/~mschmidt/Software/minFunc.html.
+class WolfeLineSearch : public LineSearch {
+ public:
+  explicit WolfeLineSearch(const LineSearch::Options& options);
+  virtual ~WolfeLineSearch() {}
+
+  // Returns true iff either a valid point, or valid bracket are found.
+  bool BracketingPhase(const FunctionSample& initial_position,
+                       const double step_size_estimate,
+                       FunctionSample* bracket_low,
+                       FunctionSample* bracket_high,
+                       bool* perform_zoom_search,
+                       Summary* summary) const;
+  // Returns true iff final_line_sample satisfies strong Wolfe conditions.
+  bool ZoomPhase(const FunctionSample& initial_position,
+                 FunctionSample bracket_low,
+                 FunctionSample bracket_high,
+                 FunctionSample* solution,
+                 Summary* summary) const;
+
+ private:
+  virtual void DoSearch(double step_size_estimate,
+                        double initial_cost,
+                        double initial_gradient,
+                        Summary* summary) const;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINE_SEARCH_H_
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
new file mode 100644
index 0000000..1f9d205
--- /dev/null
+++ b/internal/ceres/line_search_direction.cc
@@ -0,0 +1,372 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/line_search_direction.h"
+#include "ceres/line_search_minimizer.h"
+#include "ceres/low_rank_inverse_hessian.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+class SteepestDescent : public LineSearchDirection {
+ public:
+  virtual ~SteepestDescent() {}
+  bool NextDirection(const LineSearchMinimizer::State& previous,
+                     const LineSearchMinimizer::State& current,
+                     Vector* search_direction) {
+    *search_direction = -current.gradient;
+    return true;
+  }
+};
+
+class NonlinearConjugateGradient : public LineSearchDirection {
+ public:
+  NonlinearConjugateGradient(const NonlinearConjugateGradientType type,
+                             const double function_tolerance)
+      : type_(type),
+        function_tolerance_(function_tolerance) {
+  }
+
+  bool NextDirection(const LineSearchMinimizer::State& previous,
+                     const LineSearchMinimizer::State& current,
+                     Vector* search_direction) {
+    double beta = 0.0;
+    Vector gradient_change;
+    switch (type_) {
+      case FLETCHER_REEVES:
+        beta = current.gradient_squared_norm / previous.gradient_squared_norm;
+        break;
+      case POLAK_RIBIERE:
+        gradient_change = current.gradient - previous.gradient;
+        beta = (current.gradient.dot(gradient_change) /
+                previous.gradient_squared_norm);
+        break;
+      case HESTENES_STIEFEL:
+        gradient_change = current.gradient - previous.gradient;
+        beta =  (current.gradient.dot(gradient_change) /
+                 previous.search_direction.dot(gradient_change));
+        break;
+      default:
+        LOG(FATAL) << "Unknown nonlinear conjugate gradient type: " << type_;
+    }
+
+    *search_direction =  -current.gradient + beta * previous.search_direction;
+    const double directional_derivative =
+        current.gradient.dot(*search_direction);
+    if (directional_derivative > -function_tolerance_) {
+      LOG(WARNING) << "Restarting non-linear conjugate gradients: "
+                   << directional_derivative;
+      *search_direction = -current.gradient;
+    }
+
+    return true;
+  }
+
+ private:
+  const NonlinearConjugateGradientType type_;
+  const double function_tolerance_;
+};
+
+class LBFGS : public LineSearchDirection {
+ public:
+  LBFGS(const int num_parameters,
+        const int max_lbfgs_rank,
+        const bool use_approximate_eigenvalue_bfgs_scaling)
+      : low_rank_inverse_hessian_(num_parameters,
+                                  max_lbfgs_rank,
+                                  use_approximate_eigenvalue_bfgs_scaling),
+        is_positive_definite_(true) {}
+
+  virtual ~LBFGS() {}
+
+  bool NextDirection(const LineSearchMinimizer::State& previous,
+                     const LineSearchMinimizer::State& current,
+                     Vector* search_direction) {
+    CHECK(is_positive_definite_)
+        << "Ceres bug: NextDirection() called on L-BFGS after inverse Hessian "
+        << "approximation has become indefinite, please contact the "
+        << "developers!";
+
+    low_rank_inverse_hessian_.Update(
+        previous.search_direction * previous.step_size,
+        current.gradient - previous.gradient);
+
+    search_direction->setZero();
+    low_rank_inverse_hessian_.RightMultiply(current.gradient.data(),
+                                            search_direction->data());
+    *search_direction *= -1.0;
+
+    if (search_direction->dot(current.gradient) >= 0.0) {
+      LOG(WARNING) << "Numerical failure in L-BFGS update: inverse Hessian "
+                   << "approximation is not positive definite, and thus "
+                   << "initial gradient for search direction is positive: "
+                   << search_direction->dot(current.gradient);
+      is_positive_definite_ = false;
+      return false;
+    }
+
+    return true;
+  }
+
+ private:
+  LowRankInverseHessian low_rank_inverse_hessian_;
+  bool is_positive_definite_;
+};
+
+class BFGS : public LineSearchDirection {
+ public:
+  BFGS(const int num_parameters,
+       const bool use_approximate_eigenvalue_scaling)
+      : num_parameters_(num_parameters),
+        use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
+        initialized_(false),
+        is_positive_definite_(true) {
+    LOG_IF(WARNING, num_parameters_ >= 1e3)
+        << "BFGS line search being created with: " << num_parameters_
+        << " parameters, this will allocate a dense approximate inverse Hessian"
+        << " of size: " << num_parameters_ << " x " << num_parameters_
+        << ", consider using the L-BFGS memory-efficient line search direction "
+        << "instead.";
+    // Construct inverse_hessian_ after logging warning about size s.t. if the
+    // allocation crashes us, the log will highlight what the issue likely was.
+    inverse_hessian_ = Matrix::Identity(num_parameters, num_parameters);
+  }
+
+  virtual ~BFGS() {}
+
+  bool NextDirection(const LineSearchMinimizer::State& previous,
+                     const LineSearchMinimizer::State& current,
+                     Vector* search_direction) {
+    CHECK(is_positive_definite_)
+        << "Ceres bug: NextDirection() called on BFGS after inverse Hessian "
+        << "approximation has become indefinite, please contact the "
+        << "developers!";
+
+    const Vector delta_x = previous.search_direction * previous.step_size;
+    const Vector delta_gradient = current.gradient - previous.gradient;
+    const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
+
+    // The (L)BFGS algorithm explicitly requires that the secant equation:
+    //
+    //   B_{k+1} * s_k = y_k
+    //
+    // Is satisfied at each iteration, where B_{k+1} is the approximated
+    // Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+    // y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+    // positive definite, this is equivalent to the condition:
+    //
+    //   s_k^T * y_k > 0     [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+    //
+    // This condition would always be satisfied if the function was strictly
+    // convex, alternatively, it is always satisfied provided that a Wolfe line
+    // search is used (even if the function is not strictly convex).  See [1]
+    // (p138) for a proof.
+    //
+    // Although Ceres will always use a Wolfe line search when using (L)BFGS,
+    // practical implementation considerations mean that the line search
+    // may return a point that satisfies only the Armijo condition, and thus
+    // could violate the Secant equation.  As such, we will only use a step
+    // to update the Hessian approximation if:
+    //
+    //   s_k^T * y_k > tolerance
+    //
+    // It is important that tolerance is very small (and >=0), as otherwise we
+    // might skip the update too often and fail to capture important curvature
+    // information in the Hessian.  For example going from 1e-10 -> 1e-14
+    // improves the NIST benchmark score from 43/54 to 53/54.
+    //
+    // [1] Nocedal J, Wright S, Numerical Optimization, 2nd Ed. Springer, 1999.
+    //
+    // TODO(alexs.mac): Consider using Damped BFGS update instead of
+    // skipping update.
+    const double kBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+    if (delta_x_dot_delta_gradient <=
+        kBFGSSecantConditionHessianUpdateTolerance) {
+      VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too "
+              << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+              << kBFGSSecantConditionHessianUpdateTolerance
+              << " (Secant condition).";
+    } else {
+      // Update dense inverse Hessian approximation.
+
+      if (!initialized_ && use_approximate_eigenvalue_scaling_) {
+        // Rescale the initial inverse Hessian approximation (H_0) to be
+        // iteratively updated so that it is of similar 'size' to the true
+        // inverse Hessian at the start point.  As shown in [1]:
+        //
+        //   \gamma = (delta_gradient_{0}' * delta_x_{0}) /
+        //            (delta_gradient_{0}' * delta_gradient_{0})
+        //
+        // Satisfies:
+        //
+        //   (1 / \lambda_m) <= \gamma <= (1 / \lambda_1)
+        //
+        // Where \lambda_1 & \lambda_m are the smallest and largest eigenvalues
+        // of the true initial Hessian (not the inverse) respectively. Thus,
+        // \gamma is an approximate eigenvalue of the true inverse Hessian, and
+        // choosing: H_0 = I * \gamma will yield a starting point that has a
+        // similar scale to the true inverse Hessian.  This technique is widely
+        // reported to often improve convergence, however this is not
+        // universally true, particularly if there are errors in the initial
+        // gradients, or if there are significant differences in the sensitivity
+        // of the problem to the parameters (i.e. the range of the magnitudes of
+        // the components of the gradient is large).
+        //
+        // The original origin of this rescaling trick is somewhat unclear, the
+        // earliest reference appears to be Oren [1], however it is widely
+        // discussed without specific attributation in various texts including
+        // [2] (p143).
+        //
+        // [1] Oren S.S., Self-scaling variable metric (SSVM) algorithms
+        //     Part II: Implementation and experiments, Management Science,
+        //     20(5), 863-874, 1974.
+        // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
+        const double approximate_eigenvalue_scale =
+            delta_x_dot_delta_gradient / delta_gradient.dot(delta_gradient);
+        inverse_hessian_ *= approximate_eigenvalue_scale;
+
+        VLOG(4) << "Applying approximate_eigenvalue_scale: "
+                << approximate_eigenvalue_scale << " to initial inverse "
+                << "Hessian approximation.";
+      }
+      initialized_ = true;
+
+      // Efficient O(num_parameters^2) BFGS update [2].
+      //
+      // Starting from dense BFGS update detailed in Nocedal [2] p140/177 and
+      // using: y_k = delta_gradient, s_k = delta_x:
+      //
+      //   \rho_k = 1.0 / (s_k' * y_k)
+      //   V_k = I - \rho_k * y_k * s_k'
+      //   H_k = (V_k' * H_{k-1} * V_k) + (\rho_k * s_k * s_k')
+      //
+      // This update involves matrix, matrix products which naively O(N^3),
+      // however we can exploit our knowledge that H_k is positive definite
+      // and thus by defn. symmetric to reduce the cost of the update:
+      //
+      // Expanding the update above yields:
+      //
+      //   H_k = H_{k-1} +
+      //         \rho_k * ( (1.0 + \rho_k * y_k' * H_k * y_k) * s_k * s_k' -
+      //                    (s_k * y_k' * H_k + H_k * y_k * s_k') )
+      //
+      // Using: A = (s_k * y_k' * H_k), and the knowledge that H_k = H_k', the
+      // last term simplifies to (A + A'). Note that although A is not symmetric
+      // (A + A') is symmetric. For ease of construction we also define
+      // B = (1 + \rho_k * y_k' * H_k * y_k) * s_k * s_k', which is by defn
+      // symmetric due to construction from: s_k * s_k'.
+      //
+      // Now we can write the BFGS update as:
+      //
+      //   H_k = H_{k-1} + \rho_k * (B - (A + A'))
+
+      // For efficiency, as H_k is by defn. symmetric, we will only maintain the
+      // *lower* triangle of H_k (and all intermediary terms).
+
+      const double rho_k = 1.0 / delta_x_dot_delta_gradient;
+
+      // Calculate: A = s_k * y_k' * H_k
+      Matrix A = delta_x * (delta_gradient.transpose() *
+                            inverse_hessian_.selfadjointView<Eigen::Lower>());
+
+      // Calculate scalar: (1 + \rho_k * y_k' * H_k * y_k)
+      const double delta_x_times_delta_x_transpose_scale_factor =
+          (1.0 + (rho_k * delta_gradient.transpose() *
+                  inverse_hessian_.selfadjointView<Eigen::Lower>() *
+                  delta_gradient));
+      // Calculate: B = (1 + \rho_k * y_k' * H_k * y_k) * s_k * s_k'
+      Matrix B = Matrix::Zero(num_parameters_, num_parameters_);
+      B.selfadjointView<Eigen::Lower>().
+          rankUpdate(delta_x, delta_x_times_delta_x_transpose_scale_factor);
+
+      // Finally, update inverse Hessian approximation according to:
+      // H_k = H_{k-1} + \rho_k * (B - (A + A')).  Note that (A + A') is
+      // symmetric, even though A is not.
+      inverse_hessian_.triangularView<Eigen::Lower>() +=
+          rho_k * (B - A - A.transpose());
+    }
+
+    *search_direction =
+        inverse_hessian_.selfadjointView<Eigen::Lower>() *
+        (-1.0 * current.gradient);
+
+    if (search_direction->dot(current.gradient) >= 0.0) {
+      LOG(WARNING) << "Numerical failure in BFGS update: inverse Hessian "
+                   << "approximation is not positive definite, and thus "
+                   << "initial gradient for search direction is positive: "
+                   << search_direction->dot(current.gradient);
+      is_positive_definite_ = false;
+      return false;
+    }
+
+    return true;
+  }
+
+ private:
+  const int num_parameters_;
+  const bool use_approximate_eigenvalue_scaling_;
+  Matrix inverse_hessian_;
+  bool initialized_;
+  bool is_positive_definite_;
+};
+
+LineSearchDirection*
+LineSearchDirection::Create(const LineSearchDirection::Options& options) {
+  if (options.type == STEEPEST_DESCENT) {
+    return new SteepestDescent;
+  }
+
+  if (options.type == NONLINEAR_CONJUGATE_GRADIENT) {
+    return new NonlinearConjugateGradient(
+        options.nonlinear_conjugate_gradient_type,
+        options.function_tolerance);
+  }
+
+  if (options.type == ceres::LBFGS) {
+    return new ceres::internal::LBFGS(
+        options.num_parameters,
+        options.max_lbfgs_rank,
+        options.use_approximate_eigenvalue_bfgs_scaling);
+  }
+
+  if (options.type == ceres::BFGS) {
+    return new ceres::internal::BFGS(
+        options.num_parameters,
+        options.use_approximate_eigenvalue_bfgs_scaling);
+  }
+
+  LOG(ERROR) << "Unknown line search direction type: " << options.type;
+  return NULL;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
new file mode 100644
index 0000000..467578d
--- /dev/null
+++ b/internal/ceres/line_search_direction.h
@@ -0,0 +1,72 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
+#define CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
+
+#include "ceres/internal/eigen.h"
+#include "ceres/line_search_minimizer.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class LineSearchDirection {
+ public:
+  struct Options {
+    Options()
+        : num_parameters(0),
+          type(LBFGS),
+          nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
+          function_tolerance(1e-12),
+          max_lbfgs_rank(20),
+          use_approximate_eigenvalue_bfgs_scaling(true) {
+    }
+
+    int num_parameters;
+    LineSearchDirectionType type;
+    NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+    double function_tolerance;
+    int max_lbfgs_rank;
+    bool use_approximate_eigenvalue_bfgs_scaling;
+  };
+
+  static LineSearchDirection* Create(const Options& options);
+
+  virtual ~LineSearchDirection() {}
+  virtual bool NextDirection(const LineSearchMinimizer::State& previous,
+                             const LineSearchMinimizer::State& current,
+                             Vector* search_direction) = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
new file mode 100644
index 0000000..ac0a192
--- /dev/null
+++ b/internal/ceres/line_search_minimizer.cc
@@ -0,0 +1,448 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Generic loop for line search based optimization algorithms.
+//
+// This is primarily inpsired by the minFunc packaged written by Mark
+// Schmidt.
+//
+// http://www.di.ens.fr/~mschmidt/Software/minFunc.html
+//
+// For details on the theory and implementation see "Numerical
+// Optimization" by Nocedal & Wright.
+
+#include "ceres/line_search_minimizer.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cmath>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/array_utils.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/line_search.h"
+#include "ceres/line_search_direction.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+bool EvaluateGradientNorms(Evaluator* evaluator,
+                           const Vector& x,
+                           LineSearchMinimizer::State* state,
+                           std::string* message) {
+  Vector negative_gradient = -state->gradient;
+  Vector projected_gradient_step(x.size());
+  if (!evaluator->Plus(
+          x.data(), negative_gradient.data(), projected_gradient_step.data())) {
+    *message = "projected_gradient_step = Plus(x, -gradient) failed.";
+    return false;
+  }
+
+  state->gradient_squared_norm = (x - projected_gradient_step).squaredNorm();
+  state->gradient_max_norm =
+      (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+  return true;
+}
+
+}  // namespace
+
+void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
+                                   double* parameters,
+                                   Solver::Summary* summary) {
+  const bool is_not_silent = !options.is_silent;
+  double start_time = WallTimeInSeconds();
+  double iteration_start_time =  start_time;
+
+  CHECK(options.evaluator != nullptr);
+  Evaluator* evaluator = options.evaluator.get();
+  const int num_parameters = evaluator->NumParameters();
+  const int num_effective_parameters = evaluator->NumEffectiveParameters();
+
+  summary->termination_type = NO_CONVERGENCE;
+  summary->num_successful_steps = 0;
+  summary->num_unsuccessful_steps = 0;
+
+  VectorRef x(parameters, num_parameters);
+
+  State current_state(num_parameters, num_effective_parameters);
+  State previous_state(num_parameters, num_effective_parameters);
+
+  IterationSummary iteration_summary;
+  iteration_summary.iteration = 0;
+  iteration_summary.step_is_valid = false;
+  iteration_summary.step_is_successful = false;
+  iteration_summary.cost_change = 0.0;
+  iteration_summary.gradient_max_norm = 0.0;
+  iteration_summary.gradient_norm = 0.0;
+  iteration_summary.step_norm = 0.0;
+  iteration_summary.linear_solver_iterations = 0;
+  iteration_summary.step_solver_time_in_seconds = 0;
+
+  // Do initial cost and gradient evaluation.
+  if (!evaluator->Evaluate(x.data(),
+                           &(current_state.cost),
+                           NULL,
+                           current_state.gradient.data(),
+                           NULL)) {
+    summary->termination_type = FAILURE;
+    summary->message = "Initial cost and jacobian evaluation failed.";
+    LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+    return;
+  }
+
+  if (!EvaluateGradientNorms(evaluator, x, &current_state, &summary->message)) {
+    summary->termination_type = FAILURE;
+    summary->message = "Initial cost and jacobian evaluation failed. "
+        "More details: " + summary->message;
+    LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+    return;
+  }
+
+  summary->initial_cost = current_state.cost + summary->fixed_cost;
+  iteration_summary.cost = current_state.cost + summary->fixed_cost;
+
+  iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
+  iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
+  if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+    summary->message = StringPrintf("Gradient tolerance reached. "
+                                    "Gradient max norm: %e <= %e",
+                                    iteration_summary.gradient_max_norm,
+                                    options.gradient_tolerance);
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+    return;
+  }
+
+  iteration_summary.iteration_time_in_seconds =
+      WallTimeInSeconds() - iteration_start_time;
+  iteration_summary.cumulative_time_in_seconds =
+      WallTimeInSeconds() - start_time
+      + summary->preprocessor_time_in_seconds;
+  summary->iterations.push_back(iteration_summary);
+
+  LineSearchDirection::Options line_search_direction_options;
+  line_search_direction_options.num_parameters = num_effective_parameters;
+  line_search_direction_options.type = options.line_search_direction_type;
+  line_search_direction_options.nonlinear_conjugate_gradient_type =
+      options.nonlinear_conjugate_gradient_type;
+  line_search_direction_options.max_lbfgs_rank = options.max_lbfgs_rank;
+  line_search_direction_options.use_approximate_eigenvalue_bfgs_scaling =
+      options.use_approximate_eigenvalue_bfgs_scaling;
+  std::unique_ptr<LineSearchDirection> line_search_direction(
+      LineSearchDirection::Create(line_search_direction_options));
+
+  LineSearchFunction line_search_function(evaluator);
+
+  LineSearch::Options line_search_options;
+  line_search_options.interpolation_type =
+      options.line_search_interpolation_type;
+  line_search_options.min_step_size = options.min_line_search_step_size;
+  line_search_options.sufficient_decrease =
+      options.line_search_sufficient_function_decrease;
+  line_search_options.max_step_contraction =
+      options.max_line_search_step_contraction;
+  line_search_options.min_step_contraction =
+      options.min_line_search_step_contraction;
+  line_search_options.max_num_iterations =
+      options.max_num_line_search_step_size_iterations;
+  line_search_options.sufficient_curvature_decrease =
+      options.line_search_sufficient_curvature_decrease;
+  line_search_options.max_step_expansion =
+      options.max_line_search_step_expansion;
+  line_search_options.is_silent = options.is_silent;
+  line_search_options.function = &line_search_function;
+
+  std::unique_ptr<LineSearch>
+      line_search(LineSearch::Create(options.line_search_type,
+                                     line_search_options,
+                                     &summary->message));
+  if (line_search.get() == NULL) {
+    summary->termination_type = FAILURE;
+    LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
+    return;
+  }
+
+  LineSearch::Summary line_search_summary;
+  int num_line_search_direction_restarts = 0;
+
+  while (true) {
+    if (!RunCallbacks(options, iteration_summary, summary)) {
+      break;
+    }
+
+    iteration_start_time = WallTimeInSeconds();
+    if (iteration_summary.iteration >= options.max_num_iterations) {
+      summary->message = "Maximum number of iterations reached.";
+      summary->termination_type = NO_CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    const double total_solver_time = iteration_start_time - start_time +
+        summary->preprocessor_time_in_seconds;
+    if (total_solver_time >= options.max_solver_time_in_seconds) {
+      summary->message = "Maximum solver time reached.";
+      summary->termination_type = NO_CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    iteration_summary = IterationSummary();
+    iteration_summary.iteration = summary->iterations.back().iteration + 1;
+    iteration_summary.step_is_valid = false;
+    iteration_summary.step_is_successful = false;
+
+    bool line_search_status = true;
+    if (iteration_summary.iteration == 1) {
+      current_state.search_direction = -current_state.gradient;
+    } else {
+      line_search_status = line_search_direction->NextDirection(
+          previous_state,
+          current_state,
+          &current_state.search_direction);
+    }
+
+    if (!line_search_status &&
+        num_line_search_direction_restarts >=
+        options.max_num_line_search_direction_restarts) {
+      // Line search direction failed to generate a new direction, and we
+      // have already reached our specified maximum number of restarts,
+      // terminate optimization.
+      summary->message =
+          StringPrintf("Line search direction failure: specified "
+                       "max_num_line_search_direction_restarts: %d reached.",
+                       options.max_num_line_search_direction_restarts);
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    } else if (!line_search_status) {
+      // Restart line search direction with gradient descent on first iteration
+      // as we have not yet reached our maximum number of restarts.
+      CHECK_LT(num_line_search_direction_restarts,
+               options.max_num_line_search_direction_restarts);
+
+      ++num_line_search_direction_restarts;
+      LOG_IF(WARNING, is_not_silent)
+          << "Line search direction algorithm: "
+          << LineSearchDirectionTypeToString(
+              options.line_search_direction_type)
+          << ", failed to produce a valid new direction at "
+          << "iteration: " << iteration_summary.iteration
+          << ". Restarting, number of restarts: "
+          << num_line_search_direction_restarts << " / "
+          << options.max_num_line_search_direction_restarts
+          << " [max].";
+      line_search_direction.reset(
+          LineSearchDirection::Create(line_search_direction_options));
+      current_state.search_direction = -current_state.gradient;
+    }
+
+    line_search_function.Init(x, current_state.search_direction);
+    current_state.directional_derivative =
+        current_state.gradient.dot(current_state.search_direction);
+
+    // TODO(sameeragarwal): Refactor this into its own object and add
+    // explanations for the various choices.
+    //
+    // Note that we use !line_search_status to ensure that we treat cases when
+    // we restarted the line search direction equivalently to the first
+    // iteration.
+    const double initial_step_size =
+        (iteration_summary.iteration == 1 || !line_search_status)
+        ? std::min(1.0, 1.0 / current_state.gradient_max_norm)
+        : std::min(1.0, 2.0 * (current_state.cost - previous_state.cost) /
+                   current_state.directional_derivative);
+    // By definition, we should only ever go forwards along the specified search
+    // direction in a line search, most likely cause for this being violated
+    // would be a numerical failure in the line search direction calculation.
+    if (initial_step_size < 0.0) {
+      summary->message =
+          StringPrintf("Numerical failure in line search, initial_step_size is "
+                       "negative: %.5e, directional_derivative: %.5e, "
+                       "(current_cost - previous_cost): %.5e",
+                       initial_step_size, current_state.directional_derivative,
+                       (current_state.cost - previous_state.cost));
+      summary->termination_type = FAILURE;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    line_search->Search(initial_step_size,
+                        current_state.cost,
+                        current_state.directional_derivative,
+                        &line_search_summary);
+    if (!line_search_summary.success) {
+      summary->message =
+          StringPrintf("Numerical failure in line search, failed to find "
+                       "a valid step size, (did not run out of iterations) "
+                       "using initial_step_size: %.5e, initial_cost: %.5e, "
+                       "initial_gradient: %.5e.",
+                       initial_step_size, current_state.cost,
+                       current_state.directional_derivative);
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      summary->termination_type = FAILURE;
+      break;
+    }
+
+    const FunctionSample& optimal_point = line_search_summary.optimal_point;
+    CHECK(optimal_point.vector_x_is_valid)
+        << "Congratulations, you found a bug in Ceres. Please report it.";
+    current_state.step_size = optimal_point.x;
+    previous_state = current_state;
+    iteration_summary.step_solver_time_in_seconds =
+        WallTimeInSeconds() - iteration_start_time;
+
+    if (optimal_point.vector_gradient_is_valid) {
+      current_state.cost = optimal_point.value;
+      current_state.gradient = optimal_point.vector_gradient;
+    } else {
+      Evaluator::EvaluateOptions evaluate_options;
+      evaluate_options.new_evaluation_point = false;
+      if (!evaluator->Evaluate(evaluate_options,
+                               optimal_point.vector_x.data(),
+                               &(current_state.cost),
+                               NULL,
+                               current_state.gradient.data(),
+                               NULL)) {
+        summary->termination_type = FAILURE;
+        summary->message = "Cost and jacobian evaluation failed.";
+        LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+        return;
+      }
+    }
+
+    if (!EvaluateGradientNorms(evaluator,
+                               optimal_point.vector_x,
+                               &current_state,
+                               &summary->message)) {
+      summary->termination_type = FAILURE;
+      summary->message =
+          "Step failed to evaluate. This should not happen as the step was "
+          "valid when it was selected by the line search. More details: " +
+          summary->message;
+      LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    // Compute the norm of the step in the ambient space.
+    iteration_summary.step_norm = (optimal_point.vector_x - x).norm();
+    const double x_norm = x.norm();
+    x = optimal_point.vector_x;
+
+    iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
+    iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
+    iteration_summary.cost_change = previous_state.cost - current_state.cost;
+    iteration_summary.cost = current_state.cost + summary->fixed_cost;
+
+    iteration_summary.step_is_valid = true;
+    iteration_summary.step_is_successful = true;
+    iteration_summary.step_size =  current_state.step_size;
+    iteration_summary.line_search_function_evaluations =
+        line_search_summary.num_function_evaluations;
+    iteration_summary.line_search_gradient_evaluations =
+        line_search_summary.num_gradient_evaluations;
+    iteration_summary.line_search_iterations =
+        line_search_summary.num_iterations;
+    iteration_summary.iteration_time_in_seconds =
+        WallTimeInSeconds() - iteration_start_time;
+    iteration_summary.cumulative_time_in_seconds =
+        WallTimeInSeconds() - start_time
+        + summary->preprocessor_time_in_seconds;
+    summary->iterations.push_back(iteration_summary);
+
+    // Iterations inside the line search algorithm are considered
+    // 'steps' in the broader context, to distinguish these inner
+    // iterations from from the outer iterations of the line search
+    // minimizer. The number of line search steps is the total number
+    // of inner line search iterations (or steps) across the entire
+    // minimization.
+    summary->num_line_search_steps +=  line_search_summary.num_iterations;
+    summary->line_search_cost_evaluation_time_in_seconds +=
+        line_search_summary.cost_evaluation_time_in_seconds;
+    summary->line_search_gradient_evaluation_time_in_seconds +=
+        line_search_summary.gradient_evaluation_time_in_seconds;
+    summary->line_search_polynomial_minimization_time_in_seconds +=
+        line_search_summary.polynomial_minimization_time_in_seconds;
+    summary->line_search_total_time_in_seconds +=
+        line_search_summary.total_time_in_seconds;
+    ++summary->num_successful_steps;
+
+    const double step_size_tolerance = options.parameter_tolerance *
+                                       (x_norm + options.parameter_tolerance);
+    if (iteration_summary.step_norm <= step_size_tolerance) {
+      summary->message =
+          StringPrintf("Parameter tolerance reached. "
+                       "Relative step_norm: %e <= %e.",
+                       (iteration_summary.step_norm /
+                        (x_norm + options.parameter_tolerance)),
+                       options.parameter_tolerance);
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      return;
+    }
+
+    if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+      summary->message = StringPrintf("Gradient tolerance reached. "
+                                      "Gradient max norm: %e <= %e",
+                                      iteration_summary.gradient_max_norm,
+                                      options.gradient_tolerance);
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+
+    const double absolute_function_tolerance =
+        options.function_tolerance * previous_state.cost;
+    if (fabs(iteration_summary.cost_change) <= absolute_function_tolerance) {
+      summary->message =
+          StringPrintf("Function tolerance reached. "
+                       "|cost_change|/cost: %e <= %e",
+                       fabs(iteration_summary.cost_change) /
+                       previous_state.cost,
+                       options.function_tolerance);
+      summary->termination_type = CONVERGENCE;
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      break;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search_minimizer.h b/internal/ceres/line_search_minimizer.h
new file mode 100644
index 0000000..54b7202
--- /dev/null
+++ b/internal/ceres/line_search_minimizer.h
@@ -0,0 +1,77 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
+#define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
+
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+#include "ceres/types.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Generic line search minimization algorithm.
+//
+// For example usage, see SolverImpl::Minimize.
+class LineSearchMinimizer : public Minimizer {
+ public:
+  struct State {
+    State(int num_parameters,
+          int num_effective_parameters)
+        : cost(0.0),
+          gradient(num_effective_parameters),
+          gradient_squared_norm(0.0),
+          search_direction(num_effective_parameters),
+          directional_derivative(0.0),
+          step_size(0.0) {
+    }
+
+    double cost;
+    Vector gradient;
+    double gradient_squared_norm;
+    double gradient_max_norm;
+    Vector search_direction;
+    double directional_derivative;
+    double step_size;
+  };
+
+  ~LineSearchMinimizer() {}
+  virtual void Minimize(const Minimizer::Options& options,
+                        double* parameters,
+                        Solver::Summary* summary);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
diff --git a/internal/ceres/line_search_minimizer_test.cc b/internal/ceres/line_search_minimizer_test.cc
new file mode 100644
index 0000000..aa83769
--- /dev/null
+++ b/internal/ceres/line_search_minimizer_test.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <cstdlib>
+
+#include "ceres/ceres.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class QuadraticFirstOrderFunction : public ceres::FirstOrderFunction {
+ public:
+  virtual bool Evaluate(const double* parameters,
+                        double* cost,
+                        double* gradient) const {
+
+    cost[0] = parameters[0] * parameters[0];
+    if (gradient != NULL) {
+      gradient[0] = 2.0 * parameters[0];
+    }
+    return true;
+  }
+
+  virtual int NumParameters() const { return 1; }
+};
+
+TEST(LineSearchMinimizerTest, FinalCostIsZero) {
+  double parameters[1] = {2.0};
+  ceres::GradientProblem problem(new QuadraticFirstOrderFunction);
+  ceres::GradientProblemSolver::Options options;
+  ceres::GradientProblemSolver::Summary summary;
+  ceres::Solve(options, problem, parameters, &summary);
+  EXPECT_NEAR(summary.final_cost, 0.0, std::numeric_limits<double>::epsilon());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search_preprocessor.cc b/internal/ceres/line_search_preprocessor.cc
new file mode 100644
index 0000000..17226ad
--- /dev/null
+++ b/internal/ceres/line_search_preprocessor.cc
@@ -0,0 +1,110 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/line_search_preprocessor.h"
+
+#include <numeric>
+#include <string>
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/evaluator.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+bool IsProgramValid(const Program& program, std::string* error) {
+  if (program.IsBoundsConstrained()) {
+    *error = "LINE_SEARCH Minimizer does not support bounds.";
+    return false;
+  }
+  return program.ParameterBlocksAreFinite(error);
+}
+
+bool SetupEvaluator(PreprocessedProblem* pp) {
+  pp->evaluator_options = Evaluator::Options();
+  // This ensures that we get a Block Jacobian Evaluator without any
+  // requirement on orderings.
+  pp->evaluator_options.linear_solver_type = CGNR;
+  pp->evaluator_options.num_eliminate_blocks = 0;
+  pp->evaluator_options.num_threads = pp->options.num_threads;
+  pp->evaluator_options.context = pp->problem->context();
+  pp->evaluator_options.evaluation_callback = pp->options.evaluation_callback;
+  pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
+                                        pp->reduced_program.get(),
+                                        &pp->error));
+  return (pp->evaluator.get() != NULL);
+}
+
+}  // namespace
+
+LineSearchPreprocessor::~LineSearchPreprocessor() {
+}
+
+bool LineSearchPreprocessor::Preprocess(const Solver::Options& options,
+                                        ProblemImpl* problem,
+                                        PreprocessedProblem* pp) {
+  CHECK(pp != nullptr);
+  pp->options = options;
+  ChangeNumThreadsIfNeeded(&pp->options);
+
+  pp->problem = problem;
+  Program* program = problem->mutable_program();
+  if (!IsProgramValid(*program, &pp->error)) {
+    return false;
+  }
+
+  pp->reduced_program.reset(
+      program->CreateReducedProgram(&pp->removed_parameter_blocks,
+                                    &pp->fixed_cost,
+                                    &pp->error));
+
+  if (pp->reduced_program.get() == NULL) {
+    return false;
+  }
+
+  if (pp->reduced_program->NumParameterBlocks() == 0) {
+    return true;
+  }
+
+  if (!SetupEvaluator(pp)) {
+    return false;
+  }
+
+  SetupCommonMinimizerOptions(pp);
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/line_search_preprocessor.h b/internal/ceres/line_search_preprocessor.h
new file mode 100644
index 0000000..132d83a
--- /dev/null
+++ b/internal/ceres/line_search_preprocessor.h
@@ -0,0 +1,50 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LINE_SEARCH_PREPROCESSOR_H_
+#define CERES_INTERNAL_LINE_SEARCH_PREPROCESSOR_H_
+
+#include "ceres/preprocessor.h"
+
+namespace ceres {
+namespace internal {
+
+class LineSearchPreprocessor : public Preprocessor {
+ public:
+  virtual ~LineSearchPreprocessor();
+  virtual bool Preprocess(const Solver::Options& options,
+                          ProblemImpl* problem,
+                          PreprocessedProblem* preprocessed_problem);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINE_SEARCH_PREPROCESSOR_H_
diff --git a/internal/ceres/line_search_preprocessor_test.cc b/internal/ceres/line_search_preprocessor_test.cc
new file mode 100644
index 0000000..301509c
--- /dev/null
+++ b/internal/ceres/line_search_preprocessor_test.cc
@@ -0,0 +1,137 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <map>
+
+#include "ceres/line_search_preprocessor.h"
+#include "ceres/problem_impl.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(LineSearchPreprocessor, ZeroProblem) {
+  ProblemImpl problem;
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(LineSearchPreprocessor, ProblemWithInvalidParameterBlock) {
+  ProblemImpl problem;
+  double x = std::numeric_limits<double>::quiet_NaN();
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(LineSearchPreprocessor, ParameterBlockHasBounds) {
+  ProblemImpl problem;
+  double x = 1.0;
+  problem.AddParameterBlock(&x, 1);
+  problem.SetParameterUpperBound(&x, 0, 1.0);
+  problem.SetParameterLowerBound(&x, 0, 2.0);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+class FailingCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    return false;
+  }
+};
+
+TEST(LineSearchPreprocessor, RemoveParameterBlocksFailed) {
+  ProblemImpl problem;
+  double x = 3.0;
+  problem.AddResidualBlock(new FailingCostFunction, NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(LineSearchPreprocessor, RemoveParameterBlocksSucceeds) {
+  ProblemImpl problem;
+  double x = 3.0;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+template <int kNumResiduals, int... Ns>
+class DummyCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
+ public:
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    return true;
+  }
+};
+
+TEST(LineSearchPreprocessor, NormalOperation) {
+  ProblemImpl problem;
+  double x = 1.0;
+  double y = 1.0;
+  double z = 1.0;
+  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, NULL, &x, &y);
+  problem.AddResidualBlock(new DummyCostFunction<1, 1, 1>, NULL, &y, &z);
+
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+
+  LineSearchPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+  EXPECT_EQ(pp.evaluator_options.linear_solver_type, CGNR);
+  EXPECT_TRUE(pp.evaluator.get() != NULL);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
new file mode 100644
index 0000000..7c523d3
--- /dev/null
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -0,0 +1,733 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_least_squares_problems.h"
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/file.h"
+#include "ceres/stringprintf.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id) {
+  switch (id) {
+    case 0:
+      return LinearLeastSquaresProblem0();
+    case 1:
+      return LinearLeastSquaresProblem1();
+    case 2:
+      return LinearLeastSquaresProblem2();
+    case 3:
+      return LinearLeastSquaresProblem3();
+    case 4:
+      return LinearLeastSquaresProblem4();
+    default:
+      LOG(FATAL) << "Unknown problem id requested " << id;
+  }
+  return NULL;
+}
+
+/*
+A = [1   2]
+    [3   4]
+    [6 -10]
+
+b = [  8
+      18
+     -18]
+
+x = [2
+     3]
+
+D = [1
+     2]
+
+x_D = [1.78448275;
+       2.82327586;]
+ */
+LinearLeastSquaresProblem* LinearLeastSquaresProblem0() {
+  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+  TripletSparseMatrix* A = new TripletSparseMatrix(3, 2, 6);
+  problem->b.reset(new double[3]);
+  problem->D.reset(new double[2]);
+
+  problem->x.reset(new double[2]);
+  problem->x_D.reset(new double[2]);
+
+  int* Ai = A->mutable_rows();
+  int* Aj = A->mutable_cols();
+  double* Ax = A->mutable_values();
+
+  int counter = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j< 2; ++j) {
+      Ai[counter] = i;
+      Aj[counter] = j;
+      ++counter;
+    }
+  }
+
+  Ax[0] = 1.;
+  Ax[1] = 2.;
+  Ax[2] = 3.;
+  Ax[3] = 4.;
+  Ax[4] = 6;
+  Ax[5] = -10;
+  A->set_num_nonzeros(6);
+  problem->A.reset(A);
+
+  problem->b[0] = 8;
+  problem->b[1] = 18;
+  problem->b[2] = -18;
+
+  problem->x[0] = 2.0;
+  problem->x[1] = 3.0;
+
+  problem->D[0] = 1;
+  problem->D[1] = 2;
+
+  problem->x_D[0] = 1.78448275;
+  problem->x_D[1] = 2.82327586;
+  return problem;
+}
+
+
+/*
+      A = [1 0  | 2 0 0
+           3 0  | 0 4 0
+           0 5  | 0 0 6
+           0 7  | 8 0 0
+           0 9  | 1 0 0
+           0 0  | 1 1 1]
+
+      b = [0
+           1
+           2
+           3
+           4
+           5]
+
+      c = A'* b = [ 3
+                   67
+                   33
+                    9
+                   17]
+
+      A'A = [10    0    2   12   0
+              0  155   65    0  30
+              2   65   70    1   1
+             12    0    1   17   1
+              0   30    1    1  37]
+
+      S = [ 42.3419  -1.4000  -11.5806
+            -1.4000   2.6000    1.0000
+            11.5806   1.0000   31.1935]
+
+      r = [ 4.3032
+            5.4000
+            5.0323]
+
+      S\r = [ 0.2102
+              2.1367
+              0.1388]
+
+      A\b = [-2.3061
+              0.3172
+              0.2102
+              2.1367
+              0.1388]
+*/
+// The following two functions create a TripletSparseMatrix and a
+// BlockSparseMatrix version of this problem.
+
+// TripletSparseMatrix version.
+LinearLeastSquaresProblem* LinearLeastSquaresProblem1() {
+  int num_rows = 6;
+  int num_cols = 5;
+
+  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+  TripletSparseMatrix* A = new TripletSparseMatrix(num_rows,
+                                                   num_cols,
+                                                   num_rows * num_cols);
+  problem->b.reset(new double[num_rows]);
+  problem->D.reset(new double[num_cols]);
+  problem->num_eliminate_blocks = 2;
+
+  int* rows = A->mutable_rows();
+  int* cols = A->mutable_cols();
+  double* values = A->mutable_values();
+
+  int nnz = 0;
+
+  // Row 1
+  {
+    rows[nnz] = 0;
+    cols[nnz] = 0;
+    values[nnz++] = 1;
+
+    rows[nnz] = 0;
+    cols[nnz] = 2;
+    values[nnz++] = 2;
+  }
+
+  // Row 2
+  {
+    rows[nnz] = 1;
+    cols[nnz] = 0;
+    values[nnz++] = 3;
+
+    rows[nnz] = 1;
+    cols[nnz] = 3;
+    values[nnz++] = 4;
+  }
+
+  // Row 3
+  {
+    rows[nnz] = 2;
+    cols[nnz] = 1;
+    values[nnz++] = 5;
+
+    rows[nnz] = 2;
+    cols[nnz] = 4;
+    values[nnz++] = 6;
+  }
+
+  // Row 4
+  {
+    rows[nnz] = 3;
+    cols[nnz] = 1;
+    values[nnz++] = 7;
+
+    rows[nnz] = 3;
+    cols[nnz] = 2;
+    values[nnz++] = 8;
+  }
+
+  // Row 5
+  {
+    rows[nnz] = 4;
+    cols[nnz] = 1;
+    values[nnz++] = 9;
+
+    rows[nnz] = 4;
+    cols[nnz] = 2;
+    values[nnz++] = 1;
+  }
+
+  // Row 6
+  {
+    rows[nnz] = 5;
+    cols[nnz] = 2;
+    values[nnz++] = 1;
+
+    rows[nnz] = 5;
+    cols[nnz] = 3;
+    values[nnz++] = 1;
+
+    rows[nnz] = 5;
+    cols[nnz] = 4;
+    values[nnz++] = 1;
+  }
+
+  A->set_num_nonzeros(nnz);
+  CHECK(A->IsValid());
+
+  problem->A.reset(A);
+
+  for (int i = 0; i < num_cols; ++i) {
+    problem->D.get()[i] = 1;
+  }
+
+  for (int i = 0; i < num_rows; ++i) {
+    problem->b.get()[i] = i;
+  }
+
+  return problem;
+}
+
+// BlockSparseMatrix version
+LinearLeastSquaresProblem* LinearLeastSquaresProblem2() {
+  int num_rows = 6;
+  int num_cols = 5;
+
+  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+  problem->b.reset(new double[num_rows]);
+  problem->D.reset(new double[num_cols]);
+  problem->num_eliminate_blocks = 2;
+
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+
+  for (int c = 0; c < num_cols; ++c) {
+    bs->cols.push_back(Block());
+    bs->cols.back().size = 1;
+    bs->cols.back().position = c;
+  }
+
+  int nnz = 0;
+
+  // Row 1
+  {
+    values[nnz++] = 1;
+    values[nnz++] = 2;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(2, 1));
+  }
+
+  // Row 2
+  {
+    values[nnz++] = 3;
+    values[nnz++] = 4;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 1;
+    row.cells.push_back(Cell(0, 2));
+    row.cells.push_back(Cell(3, 3));
+  }
+
+  // Row 3
+  {
+    values[nnz++] = 5;
+    values[nnz++] = 6;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 2;
+    row.cells.push_back(Cell(1, 4));
+    row.cells.push_back(Cell(4, 5));
+  }
+
+  // Row 4
+  {
+    values[nnz++] = 7;
+    values[nnz++] = 8;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 3;
+    row.cells.push_back(Cell(1, 6));
+    row.cells.push_back(Cell(2, 7));
+  }
+
+  // Row 5
+  {
+    values[nnz++] = 9;
+    values[nnz++] = 1;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 4;
+    row.cells.push_back(Cell(1, 8));
+    row.cells.push_back(Cell(2, 9));
+  }
+
+  // Row 6
+  {
+    values[nnz++] = 1;
+    values[nnz++] = 1;
+    values[nnz++] = 1;
+
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 5;
+    row.cells.push_back(Cell(2, 10));
+    row.cells.push_back(Cell(3, 11));
+    row.cells.push_back(Cell(4, 12));
+  }
+
+  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
+
+  for (int i = 0; i < num_cols; ++i) {
+    problem->D.get()[i] = 1;
+  }
+
+  for (int i = 0; i < num_rows; ++i) {
+    problem->b.get()[i] = i;
+  }
+
+  problem->A.reset(A);
+
+  return problem;
+}
+
+
+/*
+      A = [1 0
+           3 0
+           0 5
+           0 7
+           0 9
+           0 0]
+
+      b = [0
+           1
+           2
+           3
+           4
+           5]
+*/
+// BlockSparseMatrix version
+LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
+  int num_rows = 5;
+  int num_cols = 2;
+
+  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+  problem->b.reset(new double[num_rows]);
+  problem->D.reset(new double[num_cols]);
+  problem->num_eliminate_blocks = 2;
+
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+
+  for (int c = 0; c < num_cols; ++c) {
+    bs->cols.push_back(Block());
+    bs->cols.back().size = 1;
+    bs->cols.back().position = c;
+  }
+
+  int nnz = 0;
+
+  // Row 1
+  {
+    values[nnz++] = 1;
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+  }
+
+  // Row 2
+  {
+    values[nnz++] = 3;
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 1;
+    row.cells.push_back(Cell(0, 1));
+  }
+
+  // Row 3
+  {
+    values[nnz++] = 5;
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 2;
+    row.cells.push_back(Cell(1, 2));
+  }
+
+  // Row 4
+  {
+    values[nnz++] = 7;
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 3;
+    row.cells.push_back(Cell(1, 3));
+  }
+
+  // Row 5
+  {
+    values[nnz++] = 9;
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 4;
+    row.cells.push_back(Cell(1, 4));
+  }
+
+  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
+
+  for (int i = 0; i < num_cols; ++i) {
+    problem->D.get()[i] = 1;
+  }
+
+  for (int i = 0; i < num_rows; ++i) {
+    problem->b.get()[i] = i;
+  }
+
+  problem->A.reset(A);
+
+  return problem;
+}
+
+/*
+      A = [1 2 0 0 0 1 1
+           1 4 0 0 0 5 6
+           0 0 9 0 0 3 1]
+
+      b = [0
+           1
+           2]
+*/
+// BlockSparseMatrix version
+//
+// This problem has the unique property that it has two different
+// sized f-blocks, but only one of them occurs in the rows involving
+// the one e-block. So performing Schur elimination on this problem
+// tests the Schur Eliminator's ability to handle non-e-block rows
+// correctly when their structure does not conform to the static
+// structure determined by DetectStructure.
+//
+// NOTE: This problem is too small and rank deficient to be solved without
+// the diagonal regularization.
+LinearLeastSquaresProblem* LinearLeastSquaresProblem4() {
+  int num_rows = 3;
+  int num_cols = 7;
+
+  LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+  problem->b.reset(new double[num_rows]);
+  problem->D.reset(new double[num_cols]);
+  problem->num_eliminate_blocks = 1;
+
+  CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+  std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
+
+  // Column block structure
+  bs->cols.push_back(Block());
+  bs->cols.back().size = 2;
+  bs->cols.back().position = 0;
+
+  bs->cols.push_back(Block());
+  bs->cols.back().size = 3;
+  bs->cols.back().position = 2;
+
+  bs->cols.push_back(Block());
+  bs->cols.back().size = 2;
+  bs->cols.back().position = 5;
+
+  int nnz = 0;
+
+  // Row 1 & 2
+  {
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+
+    row.cells.push_back(Cell(0, nnz));
+    values[nnz++] = 1;
+    values[nnz++] = 2;
+    values[nnz++] = 1;
+    values[nnz++] = 4;
+
+    row.cells.push_back(Cell(2, nnz));
+    values[nnz++] = 1;
+    values[nnz++] = 1;
+    values[nnz++] = 5;
+    values[nnz++] = 6;
+  }
+
+  // Row 3
+  {
+    bs->rows.push_back(CompressedRow());
+    CompressedRow& row = bs->rows.back();
+    row.block.size = 1;
+    row.block.position = 2;
+
+    row.cells.push_back(Cell(1, nnz));
+    values[nnz++] = 9;
+    values[nnz++] = 0;
+    values[nnz++] = 0;
+
+    row.cells.push_back(Cell(2, nnz));
+    values[nnz++] = 3;
+    values[nnz++] = 1;
+  }
+
+  BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+  memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
+
+  for (int i = 0; i < num_cols; ++i) {
+    problem->D.get()[i] = (i + 1) * 100;
+  }
+
+  for (int i = 0; i < num_rows; ++i) {
+    problem->b.get()[i] = i;
+  }
+
+  problem->A.reset(A);
+  return problem;
+}
+
+namespace {
+bool DumpLinearLeastSquaresProblemToConsole(const SparseMatrix* A,
+                                            const double* D,
+                                            const double* b,
+                                            const double* x,
+                                            int num_eliminate_blocks) {
+  CHECK(A != nullptr);
+  Matrix AA;
+  A->ToDenseMatrix(&AA);
+  LOG(INFO) << "A^T: \n" << AA.transpose();
+
+  if (D != NULL) {
+    LOG(INFO) << "A's appended diagonal:\n"
+              << ConstVectorRef(D, A->num_cols());
+  }
+
+  if (b != NULL) {
+    LOG(INFO) << "b: \n" << ConstVectorRef(b, A->num_rows());
+  }
+
+  if (x != NULL) {
+    LOG(INFO) << "x: \n" << ConstVectorRef(x, A->num_cols());
+  }
+  return true;
+}
+
+void WriteArrayToFileOrDie(const string& filename,
+                           const double* x,
+                           const int size) {
+  CHECK(x != nullptr);
+  VLOG(2) << "Writing array to: " << filename;
+  FILE* fptr = fopen(filename.c_str(), "w");
+  CHECK(fptr != nullptr);
+  for (int i = 0; i < size; ++i) {
+    fprintf(fptr, "%17f\n", x[i]);
+  }
+  fclose(fptr);
+}
+
+bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
+                                             const SparseMatrix* A,
+                                             const double* D,
+                                             const double* b,
+                                             const double* x,
+                                             int num_eliminate_blocks) {
+  CHECK(A != nullptr);
+  LOG(INFO) << "writing to: " << filename_base << "*";
+
+  string matlab_script;
+  StringAppendF(&matlab_script,
+                "function lsqp = load_trust_region_problem()\n");
+  StringAppendF(&matlab_script,
+                "lsqp.num_rows = %d;\n", A->num_rows());
+  StringAppendF(&matlab_script,
+                "lsqp.num_cols = %d;\n", A->num_cols());
+
+  {
+    string filename = filename_base + "_A.txt";
+    FILE* fptr = fopen(filename.c_str(), "w");
+    CHECK(fptr != nullptr);
+    A->ToTextFile(fptr);
+    fclose(fptr);
+    StringAppendF(&matlab_script,
+                  "tmp = load('%s', '-ascii');\n", filename.c_str());
+    StringAppendF(
+        &matlab_script,
+        "lsqp.A = sparse(tmp(:, 1) + 1, tmp(:, 2) + 1, tmp(:, 3), %d, %d);\n",
+        A->num_rows(),
+        A->num_cols());
+  }
+
+
+  if (D != NULL) {
+    string filename = filename_base + "_D.txt";
+    WriteArrayToFileOrDie(filename, D, A->num_cols());
+    StringAppendF(&matlab_script,
+                  "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
+  }
+
+  if (b != NULL) {
+    string filename = filename_base + "_b.txt";
+    WriteArrayToFileOrDie(filename, b, A->num_rows());
+    StringAppendF(&matlab_script,
+                  "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
+  }
+
+  if (x != NULL) {
+    string filename = filename_base + "_x.txt";
+    WriteArrayToFileOrDie(filename, x, A->num_cols());
+    StringAppendF(&matlab_script,
+                  "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
+  }
+
+  string matlab_filename = filename_base + ".m";
+  WriteStringToFileOrDie(matlab_script, matlab_filename);
+  return true;
+}
+}  // namespace
+
+bool DumpLinearLeastSquaresProblem(const string& filename_base,
+                                   DumpFormatType dump_format_type,
+                                   const SparseMatrix* A,
+                                   const double* D,
+                                   const double* b,
+                                   const double* x,
+                                   int num_eliminate_blocks) {
+  switch (dump_format_type) {
+    case CONSOLE:
+      return DumpLinearLeastSquaresProblemToConsole(A, D, b, x,
+                                                    num_eliminate_blocks);
+    case TEXTFILE:
+      return DumpLinearLeastSquaresProblemToTextFile(filename_base,
+                                                     A, D, b, x,
+                                                     num_eliminate_blocks);
+    default:
+      LOG(FATAL) << "Unknown DumpFormatType " << dump_format_type;
+  }
+
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h
new file mode 100644
index 0000000..5dfcd34
--- /dev/null
+++ b/internal/ceres/linear_least_squares_problems.h
@@ -0,0 +1,84 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
+#define CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Structure defining a linear least squares problem and if possible
+// ground truth solutions. To be used by various LinearSolver tests.
+struct LinearLeastSquaresProblem {
+  LinearLeastSquaresProblem()
+      : num_eliminate_blocks(0) {
+  }
+
+  std::unique_ptr<SparseMatrix> A;
+  std::unique_ptr<double[]> b;
+  std::unique_ptr<double[]> D;
+  // If using the schur eliminator then how many of the variable
+  // blocks are e_type blocks.
+  int num_eliminate_blocks;
+
+  // Solution to min_x |Ax - b|^2
+  std::unique_ptr<double[]> x;
+  // Solution to min_x |Ax - b|^2 + |Dx|^2
+  std::unique_ptr<double[]> x_D;
+};
+
+// Factories for linear least squares problem.
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id);
+
+LinearLeastSquaresProblem* LinearLeastSquaresProblem0();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem1();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem2();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem3();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem4();
+
+// Write the linear least squares problem to disk. The exact format
+// depends on dump_format_type.
+bool DumpLinearLeastSquaresProblem(const std::string& filename_base,
+                                   DumpFormatType dump_format_type,
+                                   const SparseMatrix* A,
+                                   const double* D,
+                                   const double* b,
+                                   const double* x,
+                                   int num_eliminate_blocks);
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
diff --git a/internal/ceres/linear_operator.cc b/internal/ceres/linear_operator.cc
new file mode 100644
index 0000000..9d291bd
--- /dev/null
+++ b/internal/ceres/linear_operator.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_operator.h"
+
+namespace ceres {
+namespace internal {
+
+LinearOperator::~LinearOperator() {
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/linear_operator.h b/internal/ceres/linear_operator.h
new file mode 100644
index 0000000..6463fb5
--- /dev/null
+++ b/internal/ceres/linear_operator.h
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Base classes for access to an linear operator.
+
+#ifndef CERES_INTERNAL_LINEAR_OPERATOR_H_
+#define CERES_INTERNAL_LINEAR_OPERATOR_H_
+
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// This is an abstract base class for linear operators. It supports
+// access to size information and left and right multiply operators.
+class LinearOperator {
+ public:
+  virtual ~LinearOperator();
+
+  // y = y + Ax;
+  virtual void RightMultiply(const double* x, double* y) const = 0;
+  // y = y + A'x;
+  virtual void LeftMultiply(const double* x, double* y) const = 0;
+
+  virtual int num_rows() const = 0;
+  virtual int num_cols() const = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINEAR_OPERATOR_H_
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
new file mode 100644
index 0000000..107af6a
--- /dev/null
+++ b/internal/ceres/linear_solver.cc
@@ -0,0 +1,122 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_solver.h"
+
+#include "ceres/cgnr_solver.h"
+#include "ceres/dense_normal_cholesky_solver.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/iterative_schur_complement_solver.h"
+#include "ceres/schur_complement_solver.h"
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
+#include "ceres/sparse_normal_cholesky_solver.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LinearSolver::~LinearSolver() {
+}
+
+LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
+    LinearSolverType linear_solver_type) {
+  if (!IsSchurType(linear_solver_type)) {
+    return linear_solver_type;
+  }
+
+  if (linear_solver_type == SPARSE_SCHUR) {
+    return SPARSE_NORMAL_CHOLESKY;
+  }
+
+  if (linear_solver_type == DENSE_SCHUR) {
+    // TODO(sameeragarwal): This is probably not a great choice.
+    // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can take
+    // a BlockSparseMatrix as input.
+    return DENSE_QR;
+  }
+
+  if (linear_solver_type == ITERATIVE_SCHUR) {
+    return CGNR;
+  }
+
+  return linear_solver_type;
+}
+
+LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
+  CHECK(options.context != NULL);
+
+  switch (options.type) {
+    case CGNR:
+      return new CgnrSolver(options);
+
+    case SPARSE_NORMAL_CHOLESKY:
+#if defined(CERES_NO_SPARSE)
+      return NULL;
+#else
+      if (options.dynamic_sparsity) {
+        return new DynamicSparseNormalCholeskySolver(options);
+      }
+
+      return new SparseNormalCholeskySolver(options);
+#endif
+
+    case SPARSE_SCHUR:
+#if defined(CERES_NO_SPARSE)
+      return NULL;
+#else
+      return new SparseSchurComplementSolver(options);
+#endif
+
+    case DENSE_SCHUR:
+      return new DenseSchurComplementSolver(options);
+
+    case ITERATIVE_SCHUR:
+      if (options.use_explicit_schur_complement) {
+        return new SparseSchurComplementSolver(options);
+      } else {
+        return new IterativeSchurComplementSolver(options);
+      }
+
+    case DENSE_QR:
+      return new DenseQRSolver(options);
+
+    case DENSE_NORMAL_CHOLESKY:
+      return new DenseNormalCholeskySolver(options);
+
+    default:
+      LOG(FATAL) << "Unknown linear solver type :"
+                 << options.type;
+      return NULL;  // MSVC doesn't understand that LOG(FATAL) never returns.
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
new file mode 100644
index 0000000..24c245d
--- /dev/null
+++ b/internal/ceres/linear_solver.h
@@ -0,0 +1,339 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Abstract interface for objects solving linear systems of various
+// kinds.
+
+#ifndef CERES_INTERNAL_LINEAR_SOLVER_H_
+#define CERES_INTERNAL_LINEAR_SOLVER_H_
+
+#include <cstddef>
+#include <map>
+#include <string>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/execution_summary.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+enum LinearSolverTerminationType {
+  // Termination criterion was met.
+  LINEAR_SOLVER_SUCCESS,
+
+  // Solver ran for max_num_iterations and terminated before the
+  // termination tolerance could be satisfied.
+  LINEAR_SOLVER_NO_CONVERGENCE,
+
+  // Solver was terminated due to numerical problems, generally due to
+  // the linear system being poorly conditioned.
+  LINEAR_SOLVER_FAILURE,
+
+  // Solver failed with a fatal error that cannot be recovered from,
+  // e.g. CHOLMOD ran out of memory when computing the symbolic or
+  // numeric factorization or an underlying library was called with
+  // the wrong arguments.
+  LINEAR_SOLVER_FATAL_ERROR
+};
+
+// This enum controls the fill-reducing ordering a sparse linear
+// algebra library should use before computing a sparse factorization
+// (usually Cholesky).
+enum OrderingType {
+  NATURAL, // Do not re-order the matrix. This is useful when the
+           // matrix has been ordered using a fill-reducing ordering
+           // already.
+  AMD      // Use the Approximate Minimum Degree algorithm to re-order
+           // the matrix.
+};
+
+class LinearOperator;
+
+// Abstract base class for objects that implement algorithms for
+// solving linear systems
+//
+//   Ax = b
+//
+// It is expected that a single instance of a LinearSolver object
+// maybe used multiple times for solving multiple linear systems with
+// the same sparsity structure. This allows them to cache and reuse
+// information across solves. This means that calling Solve on the
+// same LinearSolver instance with two different linear systems will
+// result in undefined behaviour.
+//
+// Subclasses of LinearSolver use two structs to configure themselves.
+// The Options struct configures the LinearSolver object for its
+// lifetime. The PerSolveOptions struct is used to specify options for
+// a particular Solve call.
+class LinearSolver {
+ public:
+  struct Options {
+    LinearSolverType type = SPARSE_NORMAL_CHOLESKY;
+    PreconditionerType preconditioner_type = JACOBI;
+    VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
+    DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+        SUITE_SPARSE;
+
+    // See solver.h for information about these flags.
+    bool use_postordering = false;
+    bool dynamic_sparsity = false;
+    bool use_explicit_schur_complement = false;
+
+    // Number of internal iterations that the solver uses. This
+    // parameter only makes sense for iterative solvers like CG.
+    int min_num_iterations = 1;
+    int max_num_iterations = 1;
+
+    // If possible, how many threads can the solver use.
+    int num_threads = 1;
+
+    // Hints about the order in which the parameter blocks should be
+    // eliminated by the linear solver.
+    //
+    // For example if elimination_groups is a vector of size k, then
+    // the linear solver is informed that it should eliminate the
+    // parameter blocks 0 ... elimination_groups[0] - 1 first, and
+    // then elimination_groups[0] ... elimination_groups[1] - 1 and so
+    // on. Within each elimination group, the linear solver is free to
+    // choose how the parameter blocks are ordered. Different linear
+    // solvers have differing requirements on elimination_groups.
+    //
+    // The most common use is for Schur type solvers, where there
+    // should be at least two elimination groups and the first
+    // elimination group must form an independent set in the normal
+    // equations. The first elimination group corresponds to the
+    // num_eliminate_blocks in the Schur type solvers.
+    std::vector<int> elimination_groups;
+
+    // Iterative solvers, e.g. Preconditioned Conjugate Gradients
+    // maintain a cheap estimate of the residual which may become
+    // inaccurate over time. Thus for non-zero values of this
+    // parameter, the solver can be told to recalculate the value of
+    // the residual using a |b - Ax| evaluation.
+    int residual_reset_period = 10;
+
+    // If the block sizes in a BlockSparseMatrix are fixed, then in
+    // some cases the Schur complement based solvers can detect and
+    // specialize on them.
+    //
+    // It is expected that these parameters are set programmatically
+    // rather than manually.
+    //
+    // Please see schur_complement_solver.h and schur_eliminator.h for
+    // more details.
+    int row_block_size = Eigen::Dynamic;
+    int e_block_size = Eigen::Dynamic;
+    int f_block_size = Eigen::Dynamic;
+
+    bool use_mixed_precision_solves = false;
+    int max_num_refinement_iterations = 0;
+    ContextImpl* context = nullptr;
+  };
+
+  // Options for the Solve method.
+  struct PerSolveOptions {
+    // This option only makes sense for unsymmetric linear solvers
+    // that can solve rectangular linear systems.
+    //
+    // Given a matrix A, an optional diagonal matrix D as a vector,
+    // and a vector b, the linear solver will solve for
+    //
+    //   | A | x = | b |
+    //   | D |     | 0 |
+    //
+    // If D is null, then it is treated as zero, and the solver returns
+    // the solution to
+    //
+    //   A x = b
+    //
+    // In either case, x is the vector that solves the following
+    // optimization problem.
+    //
+    //   arg min_x ||Ax - b||^2 + ||Dx||^2
+    //
+    // Here A is a matrix of size m x n, with full column rank. If A
+    // does not have full column rank, the results returned by the
+    // solver cannot be relied on. D, if it is not null is an array of
+    // size n.  b is an array of size m and x is an array of size n.
+    double* D = nullptr;
+
+    // This option only makes sense for iterative solvers.
+    //
+    // In general the performance of an iterative linear solver
+    // depends on the condition number of the matrix A. For example
+    // the convergence rate of the conjugate gradients algorithm
+    // is proportional to the square root of the condition number.
+    //
+    // One particularly useful technique for improving the
+    // conditioning of a linear system is to precondition it. In its
+    // simplest form a preconditioner is a matrix M such that instead
+    // of solving Ax = b, we solve the linear system AM^{-1} y = b
+    // instead, where M is such that the condition number k(AM^{-1})
+    // is smaller than the conditioner k(A). Given the solution to
+    // this system, x = M^{-1} y. The iterative solver takes care of
+    // the mechanics of solving the preconditioned system and
+    // returning the corrected solution x. The user only needs to
+    // supply a linear operator.
+    //
+    // A null preconditioner is equivalent to an identity matrix being
+    // used a preconditioner.
+    LinearOperator* preconditioner = nullptr;
+
+
+    // The following tolerance related options only makes sense for
+    // iterative solvers. Direct solvers ignore them.
+
+    // Solver terminates when
+    //
+    //   |Ax - b| <= r_tolerance * |b|.
+    //
+    // This is the most commonly used termination criterion for
+    // iterative solvers.
+    double r_tolerance = 0.0;
+
+    // For PSD matrices A, let
+    //
+    //   Q(x) = x'Ax - 2b'x
+    //
+    // be the cost of the quadratic function defined by A and b. Then,
+    // the solver terminates at iteration i if
+    //
+    //   i * (Q(x_i) - Q(x_i-1)) / Q(x_i) < q_tolerance.
+    //
+    // This termination criterion is more useful when using CG to
+    // solve the Newton step. This particular convergence test comes
+    // from Stephen Nash's work on truncated Newton
+    // methods. References:
+    //
+    //   1. Stephen G. Nash & Ariela Sofer, Assessing A Search
+    //      Direction Within A Truncated Newton Method, Operation
+    //      Research Letters 9(1990) 219-221.
+    //
+    //   2. Stephen G. Nash, A Survey of Truncated Newton Methods,
+    //      Journal of Computational and Applied Mathematics,
+    //      124(1-2), 45-59, 2000.
+    //
+    double q_tolerance = 0.0;
+  };
+
+  // Summary of a call to the Solve method. We should move away from
+  // the true/false method for determining solver success. We should
+  // let the summary object do the talking.
+  struct Summary {
+    double residual_norm = -1.0;
+    int num_iterations = -1;
+    LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
+    std::string message;
+  };
+
+  // If the optimization problem is such that there are no remaining
+  // e-blocks, a Schur type linear solver cannot be used. If the
+  // linear solver is of Schur type, this function implements a policy
+  // to select an alternate nearest linear solver to the one selected
+  // by the user. The input linear_solver_type is returned otherwise.
+  static LinearSolverType LinearSolverForZeroEBlocks(
+      LinearSolverType linear_solver_type);
+
+  virtual ~LinearSolver();
+
+  // Solve Ax = b.
+  virtual Summary Solve(LinearOperator* A,
+                        const double* b,
+                        const PerSolveOptions& per_solve_options,
+                        double* x) = 0;
+
+  // This method returns copies instead of references so that the base
+  // class implementation does not have to worry about life time
+  // issues. Further, this calls are not expected to be frequent or
+  // performance sensitive.
+  virtual std::map<std::string, CallStatistics> Statistics() const {
+    return std::map<std::string, CallStatistics>();
+  }
+
+  // Factory
+  static LinearSolver* Create(const Options& options);
+};
+
+// This templated subclass of LinearSolver serves as a base class for
+// other linear solvers that depend on the particular matrix layout of
+// the underlying linear operator. For example some linear solvers
+// need low level access to the TripletSparseMatrix implementing the
+// LinearOperator interface. This class hides those implementation
+// details behind a private virtual method, and has the Solve method
+// perform the necessary upcasting.
+template <typename MatrixType>
+class TypedLinearSolver : public LinearSolver {
+ public:
+  virtual ~TypedLinearSolver() {}
+  virtual LinearSolver::Summary Solve(
+      LinearOperator* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x) {
+    ScopedExecutionTimer total_time("LinearSolver::Solve", &execution_summary_);
+    CHECK(A != nullptr);
+    CHECK(b != nullptr);
+    CHECK(x != nullptr);
+    return SolveImpl(down_cast<MatrixType*>(A), b, per_solve_options, x);
+  }
+
+  virtual std::map<std::string, CallStatistics> Statistics() const {
+    return execution_summary_.statistics();
+  }
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      MatrixType* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x) = 0;
+
+  ExecutionSummary execution_summary_;
+};
+
+// Linear solvers that depend on acccess to the low level structure of
+// a SparseMatrix.
+typedef TypedLinearSolver<BlockSparseMatrix>         BlockSparseMatrixSolver;          // NOLINT
+typedef TypedLinearSolver<CompressedRowSparseMatrix> CompressedRowSparseMatrixSolver;  // NOLINT
+typedef TypedLinearSolver<DenseSparseMatrix>         DenseSparseMatrixSolver;          // NOLINT
+typedef TypedLinearSolver<TripletSparseMatrix>       TripletSparseMatrixSolver;        // NOLINT
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LINEAR_SOLVER_H_
diff --git a/internal/ceres/local_parameterization.cc b/internal/ceres/local_parameterization.cc
new file mode 100644
index 0000000..a7fe4a1
--- /dev/null
+++ b/internal/ceres/local_parameterization.cc
@@ -0,0 +1,384 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/local_parameterization.h"
+
+#include <algorithm>
+#include "Eigen/Geometry"
+#include "ceres/householder_vector.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/rotation.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+using std::vector;
+
+LocalParameterization::~LocalParameterization() {
+}
+
+bool LocalParameterization::MultiplyByJacobian(const double* x,
+                                               const int num_rows,
+                                               const double* global_matrix,
+                                               double* local_matrix) const {
+  if (LocalSize() == 0) {
+    return true;
+  }
+
+  Matrix jacobian(GlobalSize(), LocalSize());
+  if (!ComputeJacobian(x, jacobian.data())) {
+    return false;
+  }
+
+  MatrixRef(local_matrix, num_rows, LocalSize()) =
+      ConstMatrixRef(global_matrix, num_rows, GlobalSize()) * jacobian;
+  return true;
+}
+
+IdentityParameterization::IdentityParameterization(const int size)
+    : size_(size) {
+  CHECK_GT(size, 0);
+}
+
+bool IdentityParameterization::Plus(const double* x,
+                                    const double* delta,
+                                    double* x_plus_delta) const {
+  VectorRef(x_plus_delta, size_) =
+      ConstVectorRef(x, size_) + ConstVectorRef(delta, size_);
+  return true;
+}
+
+bool IdentityParameterization::ComputeJacobian(const double* x,
+                                               double* jacobian) const {
+  MatrixRef(jacobian, size_, size_).setIdentity();
+  return true;
+}
+
+bool IdentityParameterization::MultiplyByJacobian(const double* x,
+                                                  const int num_cols,
+                                                  const double* global_matrix,
+                                                  double* local_matrix) const {
+  std::copy(global_matrix,
+            global_matrix + num_cols * GlobalSize(),
+            local_matrix);
+  return true;
+}
+
+SubsetParameterization::SubsetParameterization(
+    int size, const vector<int>& constant_parameters)
+    : local_size_(size - constant_parameters.size()), constancy_mask_(size, 0) {
+  vector<int> constant = constant_parameters;
+  std::sort(constant.begin(), constant.end());
+  CHECK_GE(constant.front(), 0) << "Indices indicating constant parameter must "
+                                   "be greater than equal to zero.";
+  CHECK_LT(constant.back(), size)
+      << "Indices indicating constant parameter must be less than the size "
+      << "of the parameter block.";
+  CHECK(std::adjacent_find(constant.begin(), constant.end()) == constant.end())
+      << "The set of constant parameters cannot contain duplicates";
+  for (int i = 0; i < constant_parameters.size(); ++i) {
+    constancy_mask_[constant_parameters[i]] = 1;
+  }
+}
+
+bool SubsetParameterization::Plus(const double* x,
+                                  const double* delta,
+                                  double* x_plus_delta) const {
+  const int global_size = GlobalSize();
+  for (int i = 0, j = 0; i < global_size; ++i) {
+    if (constancy_mask_[i]) {
+      x_plus_delta[i] = x[i];
+    } else {
+      x_plus_delta[i] = x[i] + delta[j++];
+    }
+  }
+  return true;
+}
+
+bool SubsetParameterization::ComputeJacobian(const double* x,
+                                             double* jacobian) const {
+  if (local_size_ == 0) {
+    return true;
+  }
+
+  const int global_size = GlobalSize();
+  MatrixRef m(jacobian, global_size, local_size_);
+  m.setZero();
+  for (int i = 0, j = 0; i < global_size; ++i) {
+    if (!constancy_mask_[i]) {
+      m(i, j++) = 1.0;
+    }
+  }
+  return true;
+}
+
+bool SubsetParameterization::MultiplyByJacobian(const double* x,
+                                               const int num_rows,
+                                               const double* global_matrix,
+                                               double* local_matrix) const {
+  if (local_size_ == 0) {
+    return true;
+  }
+
+  const int global_size = GlobalSize();
+  for (int row = 0; row < num_rows; ++row) {
+    for (int col = 0, j = 0; col < global_size; ++col) {
+      if (!constancy_mask_[col]) {
+        local_matrix[row * local_size_ + j++] =
+            global_matrix[row * global_size + col];
+      }
+    }
+  }
+  return true;
+}
+
+bool QuaternionParameterization::Plus(const double* x,
+                                      const double* delta,
+                                      double* x_plus_delta) const {
+  const double norm_delta =
+      sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
+  if (norm_delta > 0.0) {
+    const double sin_delta_by_delta = (sin(norm_delta) / norm_delta);
+    double q_delta[4];
+    q_delta[0] = cos(norm_delta);
+    q_delta[1] = sin_delta_by_delta * delta[0];
+    q_delta[2] = sin_delta_by_delta * delta[1];
+    q_delta[3] = sin_delta_by_delta * delta[2];
+    QuaternionProduct(q_delta, x, x_plus_delta);
+  } else {
+    for (int i = 0; i < 4; ++i) {
+      x_plus_delta[i] = x[i];
+    }
+  }
+  return true;
+}
+
+bool QuaternionParameterization::ComputeJacobian(const double* x,
+                                                 double* jacobian) const {
+  jacobian[0] = -x[1]; jacobian[1]  = -x[2]; jacobian[2]  = -x[3];  // NOLINT
+  jacobian[3] =  x[0]; jacobian[4]  =  x[3]; jacobian[5]  = -x[2];  // NOLINT
+  jacobian[6] = -x[3]; jacobian[7]  =  x[0]; jacobian[8]  =  x[1];  // NOLINT
+  jacobian[9] =  x[2]; jacobian[10] = -x[1]; jacobian[11] =  x[0];  // NOLINT
+  return true;
+}
+
+bool EigenQuaternionParameterization::Plus(const double* x_ptr,
+                                           const double* delta,
+                                           double* x_plus_delta_ptr) const {
+  Eigen::Map<Eigen::Quaterniond> x_plus_delta(x_plus_delta_ptr);
+  Eigen::Map<const Eigen::Quaterniond> x(x_ptr);
+
+  const double norm_delta =
+      sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
+  if (norm_delta > 0.0) {
+    const double sin_delta_by_delta = sin(norm_delta) / norm_delta;
+
+    // Note, in the constructor w is first.
+    Eigen::Quaterniond delta_q(cos(norm_delta),
+                               sin_delta_by_delta * delta[0],
+                               sin_delta_by_delta * delta[1],
+                               sin_delta_by_delta * delta[2]);
+    x_plus_delta = delta_q * x;
+  } else {
+    x_plus_delta = x;
+  }
+
+  return true;
+}
+
+bool EigenQuaternionParameterization::ComputeJacobian(const double* x,
+                                                      double* jacobian) const {
+  jacobian[0] =  x[3]; jacobian[1]  =  x[2]; jacobian[2]  = -x[1];  // NOLINT
+  jacobian[3] = -x[2]; jacobian[4]  =  x[3]; jacobian[5]  =  x[0];  // NOLINT
+  jacobian[6] =  x[1]; jacobian[7]  = -x[0]; jacobian[8]  =  x[3];  // NOLINT
+  jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2];  // NOLINT
+  return true;
+}
+
+HomogeneousVectorParameterization::HomogeneousVectorParameterization(int size)
+    : size_(size) {
+  CHECK_GT(size_, 1) << "The size of the homogeneous vector needs to be "
+                     << "greater than 1.";
+}
+
+bool HomogeneousVectorParameterization::Plus(const double* x_ptr,
+                                             const double* delta_ptr,
+                                             double* x_plus_delta_ptr) const {
+  ConstVectorRef x(x_ptr, size_);
+  ConstVectorRef delta(delta_ptr, size_ - 1);
+  VectorRef x_plus_delta(x_plus_delta_ptr, size_);
+
+  const double norm_delta = delta.norm();
+
+  if (norm_delta == 0.0) {
+    x_plus_delta = x;
+    return true;
+  }
+
+  // Map the delta from the minimum representation to the over parameterized
+  // homogeneous vector. See section A6.9.2 on page 624 of Hartley & Zisserman
+  // (2nd Edition) for a detailed description.  Note there is a typo on Page
+  // 625, line 4 so check the book errata.
+  const double norm_delta_div_2 = 0.5 * norm_delta;
+  const double sin_delta_by_delta = sin(norm_delta_div_2) /
+      norm_delta_div_2;
+
+  Vector y(size_);
+  y.head(size_ - 1) = 0.5 * sin_delta_by_delta * delta;
+  y(size_ - 1) = cos(norm_delta_div_2);
+
+  Vector v(size_);
+  double beta;
+  internal::ComputeHouseholderVector<double>(x, &v, &beta);
+
+  // Apply the delta update to remain on the unit sphere. See section A6.9.3
+  // on page 625 of Hartley & Zisserman (2nd Edition) for a detailed
+  // description.
+  x_plus_delta = x.norm() * (y -  v * (beta * (v.transpose() * y)));
+
+  return true;
+}
+
+bool HomogeneousVectorParameterization::ComputeJacobian(
+    const double* x_ptr, double* jacobian_ptr) const {
+  ConstVectorRef x(x_ptr, size_);
+  MatrixRef jacobian(jacobian_ptr, size_, size_ - 1);
+
+  Vector v(size_);
+  double beta;
+  internal::ComputeHouseholderVector<double>(x, &v, &beta);
+
+  // The Jacobian is equal to J = 0.5 * H.leftCols(size_ - 1) where H is the
+  // Householder matrix (H = I - beta * v * v').
+  for (int i = 0; i < size_ - 1; ++i) {
+    jacobian.col(i) = -0.5 * beta * v(i) * v;
+    jacobian.col(i)(i) += 0.5;
+  }
+  jacobian *= x.norm();
+
+  return true;
+}
+
+ProductParameterization::ProductParameterization(
+    LocalParameterization* local_param1,
+    LocalParameterization* local_param2) {
+  local_params_.push_back(local_param1);
+  local_params_.push_back(local_param2);
+  Init();
+}
+
+ProductParameterization::ProductParameterization(
+    LocalParameterization* local_param1,
+    LocalParameterization* local_param2,
+    LocalParameterization* local_param3) {
+  local_params_.push_back(local_param1);
+  local_params_.push_back(local_param2);
+  local_params_.push_back(local_param3);
+  Init();
+}
+
+ProductParameterization::ProductParameterization(
+    LocalParameterization* local_param1,
+    LocalParameterization* local_param2,
+    LocalParameterization* local_param3,
+    LocalParameterization* local_param4) {
+  local_params_.push_back(local_param1);
+  local_params_.push_back(local_param2);
+  local_params_.push_back(local_param3);
+  local_params_.push_back(local_param4);
+  Init();
+}
+
+ProductParameterization::~ProductParameterization() {
+  for (int i = 0; i < local_params_.size(); ++i) {
+    delete local_params_[i];
+  }
+}
+
+void ProductParameterization::Init() {
+  global_size_ = 0;
+  local_size_ = 0;
+  buffer_size_ = 0;
+  for (int i = 0; i < local_params_.size(); ++i) {
+    const LocalParameterization* param = local_params_[i];
+    buffer_size_ = std::max(buffer_size_,
+                            param->LocalSize() * param->GlobalSize());
+    global_size_ += param->GlobalSize();
+    local_size_ += param->LocalSize();
+  }
+}
+
+bool ProductParameterization::Plus(const double* x,
+                                   const double* delta,
+                                   double* x_plus_delta) const {
+  int x_cursor = 0;
+  int delta_cursor = 0;
+  for (int i = 0; i < local_params_.size(); ++i) {
+    const LocalParameterization* param = local_params_[i];
+    if (!param->Plus(x + x_cursor,
+                     delta + delta_cursor,
+                     x_plus_delta + x_cursor)) {
+      return false;
+    }
+    delta_cursor += param->LocalSize();
+    x_cursor += param->GlobalSize();
+  }
+
+  return true;
+}
+
+bool ProductParameterization::ComputeJacobian(const double* x,
+                                              double* jacobian_ptr) const {
+  MatrixRef jacobian(jacobian_ptr, GlobalSize(), LocalSize());
+  jacobian.setZero();
+  internal::FixedArray<double> buffer(buffer_size_);
+
+  int x_cursor = 0;
+  int delta_cursor = 0;
+  for (int i = 0; i < local_params_.size(); ++i) {
+    const LocalParameterization* param = local_params_[i];
+    const int local_size = param->LocalSize();
+    const int global_size = param->GlobalSize();
+
+    if (!param->ComputeJacobian(x + x_cursor, buffer.get())) {
+      return false;
+    }
+    jacobian.block(x_cursor, delta_cursor, global_size, local_size)
+        = MatrixRef(buffer.get(), global_size, local_size);
+
+    delta_cursor += local_size;
+    x_cursor += global_size;
+  }
+
+  return true;
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/local_parameterization_test.cc b/internal/ceres/local_parameterization_test.cc
new file mode 100644
index 0000000..18b7e8c
--- /dev/null
+++ b/internal/ceres/local_parameterization_test.cc
@@ -0,0 +1,774 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <limits>
+#include <memory>
+
+#include "Eigen/Geometry"
+#include "ceres/autodiff_local_parameterization.h"
+#include "ceres/householder_vector.h"
+#include "ceres/internal/autodiff.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/random.h"
+#include "ceres/rotation.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(IdentityParameterization, EverythingTest) {
+  IdentityParameterization parameterization(3);
+  EXPECT_EQ(parameterization.GlobalSize(), 3);
+  EXPECT_EQ(parameterization.LocalSize(), 3);
+
+  double x[3] = {1.0, 2.0, 3.0};
+  double delta[3] = {0.0, 1.0, 2.0};
+  double x_plus_delta[3] = {0.0, 0.0, 0.0};
+  parameterization.Plus(x, delta, x_plus_delta);
+  EXPECT_EQ(x_plus_delta[0], 1.0);
+  EXPECT_EQ(x_plus_delta[1], 3.0);
+  EXPECT_EQ(x_plus_delta[2], 5.0);
+
+  double jacobian[9];
+  parameterization.ComputeJacobian(x, jacobian);
+  int k = 0;
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j, ++k) {
+      EXPECT_EQ(jacobian[k], (i == j) ? 1.0 : 0.0);
+    }
+  }
+
+  Matrix global_matrix = Matrix::Ones(10, 3);
+  Matrix local_matrix = Matrix::Zero(10, 3);
+  parameterization.MultiplyByJacobian(x,
+                                      10,
+                                      global_matrix.data(),
+                                      local_matrix.data());
+  EXPECT_EQ((local_matrix - global_matrix).norm(), 0.0);
+}
+
+
+TEST(SubsetParameterization, NegativeParameterIndexDeathTest) {
+  std::vector<int> constant_parameters;
+  constant_parameters.push_back(-1);
+  EXPECT_DEATH_IF_SUPPORTED(
+      SubsetParameterization parameterization(2, constant_parameters),
+      "greater than equal to zero");
+}
+
+TEST(SubsetParameterization, GreaterThanSizeParameterIndexDeathTest) {
+  std::vector<int> constant_parameters;
+  constant_parameters.push_back(2);
+  EXPECT_DEATH_IF_SUPPORTED(
+      SubsetParameterization parameterization(2, constant_parameters),
+      "less than the size");
+}
+
+TEST(SubsetParameterization, DuplicateParametersDeathTest) {
+  std::vector<int> constant_parameters;
+  constant_parameters.push_back(1);
+  constant_parameters.push_back(1);
+  EXPECT_DEATH_IF_SUPPORTED(
+      SubsetParameterization parameterization(2, constant_parameters),
+      "duplicates");
+}
+
+TEST(SubsetParameterization,
+     ProductParameterizationWithZeroLocalSizeSubsetParameterization1) {
+  std::vector<int> constant_parameters;
+  constant_parameters.push_back(0);
+  LocalParameterization* subset_param =
+      new SubsetParameterization(1, constant_parameters);
+  LocalParameterization* identity_param = new IdentityParameterization(2);
+  ProductParameterization product_param(subset_param, identity_param);
+  EXPECT_EQ(product_param.GlobalSize(), 3);
+  EXPECT_EQ(product_param.LocalSize(), 2);
+  double x[] = {1.0, 1.0, 1.0};
+  double delta[] = {2.0, 3.0};
+  double x_plus_delta[] = {0.0, 0.0, 0.0};
+  EXPECT_TRUE(product_param.Plus(x, delta, x_plus_delta));
+  EXPECT_EQ(x_plus_delta[0], x[0]);
+  EXPECT_EQ(x_plus_delta[1], x[1] + delta[0]);
+  EXPECT_EQ(x_plus_delta[2], x[2] + delta[1]);
+
+  Matrix actual_jacobian(3, 2);
+  EXPECT_TRUE(product_param.ComputeJacobian(x, actual_jacobian.data()));
+}
+
+TEST(SubsetParameterization,
+     ProductParameterizationWithZeroLocalSizeSubsetParameterization2) {
+  std::vector<int> constant_parameters;
+  constant_parameters.push_back(0);
+  LocalParameterization* subset_param =
+      new SubsetParameterization(1, constant_parameters);
+  LocalParameterization* identity_param = new IdentityParameterization(2);
+  ProductParameterization product_param(identity_param, subset_param);
+  EXPECT_EQ(product_param.GlobalSize(), 3);
+  EXPECT_EQ(product_param.LocalSize(), 2);
+  double x[] = {1.0, 1.0, 1.0};
+  double delta[] = {2.0, 3.0};
+  double x_plus_delta[] = {0.0, 0.0, 0.0};
+  EXPECT_TRUE(product_param.Plus(x, delta, x_plus_delta));
+  EXPECT_EQ(x_plus_delta[0], x[0] + delta[0]);
+  EXPECT_EQ(x_plus_delta[1], x[1] + delta[1]);
+  EXPECT_EQ(x_plus_delta[2], x[2]);
+
+  Matrix actual_jacobian(3, 2);
+  EXPECT_TRUE(product_param.ComputeJacobian(x, actual_jacobian.data()));
+}
+
+TEST(SubsetParameterization, NormalFunctionTest) {
+  const int kGlobalSize = 4;
+  const int kLocalSize = 3;
+
+  double x[kGlobalSize] = {1.0, 2.0, 3.0, 4.0};
+  for (int i = 0; i < kGlobalSize; ++i) {
+    std::vector<int> constant_parameters;
+    constant_parameters.push_back(i);
+    SubsetParameterization parameterization(kGlobalSize, constant_parameters);
+    double delta[kLocalSize] = {1.0, 2.0, 3.0};
+    double x_plus_delta[kGlobalSize] = {0.0, 0.0, 0.0};
+
+    parameterization.Plus(x, delta, x_plus_delta);
+    int k = 0;
+    for (int j = 0; j < kGlobalSize; ++j) {
+      if (j == i)  {
+        EXPECT_EQ(x_plus_delta[j], x[j]);
+      } else {
+        EXPECT_EQ(x_plus_delta[j], x[j] + delta[k++]);
+      }
+    }
+
+    double jacobian[kGlobalSize * kLocalSize];
+    parameterization.ComputeJacobian(x, jacobian);
+    int delta_cursor = 0;
+    int jacobian_cursor = 0;
+    for (int j = 0; j < kGlobalSize; ++j) {
+      if (j != i) {
+        for (int k = 0; k < kLocalSize; ++k, jacobian_cursor++) {
+          EXPECT_EQ(jacobian[jacobian_cursor], delta_cursor == k ? 1.0 : 0.0);
+        }
+        ++delta_cursor;
+      } else {
+        for (int k = 0; k < kLocalSize; ++k, jacobian_cursor++) {
+          EXPECT_EQ(jacobian[jacobian_cursor], 0.0);
+        }
+      }
+    }
+
+    Matrix global_matrix = Matrix::Ones(10, kGlobalSize);
+    for (int row = 0; row < kGlobalSize; ++row) {
+      for (int col = 0; col < kGlobalSize; ++col) {
+        global_matrix(row, col) = col;
+      }
+    }
+
+    Matrix local_matrix = Matrix::Zero(10, kLocalSize);
+    parameterization.MultiplyByJacobian(x,
+                                        10,
+                                        global_matrix.data(),
+                                        local_matrix.data());
+    Matrix expected_local_matrix =
+        global_matrix * MatrixRef(jacobian, kGlobalSize, kLocalSize);
+    EXPECT_EQ((local_matrix - expected_local_matrix).norm(), 0.0);
+  }
+}
+
+// Functor needed to implement automatically differentiated Plus for
+// quaternions.
+struct QuaternionPlus {
+  template<typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    const T squared_norm_delta =
+        delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
+
+    T q_delta[4];
+    if (squared_norm_delta > T(0.0)) {
+      T norm_delta = sqrt(squared_norm_delta);
+      const T sin_delta_by_delta = sin(norm_delta) / norm_delta;
+      q_delta[0] = cos(norm_delta);
+      q_delta[1] = sin_delta_by_delta * delta[0];
+      q_delta[2] = sin_delta_by_delta * delta[1];
+      q_delta[3] = sin_delta_by_delta * delta[2];
+    } else {
+      // We do not just use q_delta = [1,0,0,0] here because that is a
+      // constant and when used for automatic differentiation will
+      // lead to a zero derivative. Instead we take a first order
+      // approximation and evaluate it at zero.
+      q_delta[0] = T(1.0);
+      q_delta[1] = delta[0];
+      q_delta[2] = delta[1];
+      q_delta[3] = delta[2];
+    }
+
+    QuaternionProduct(q_delta, x, x_plus_delta);
+    return true;
+  }
+};
+
+template<typename Parameterization, typename Plus>
+void QuaternionParameterizationTestHelper(
+    const double* x, const double* delta,
+    const double* x_plus_delta_ref) {
+  const int kGlobalSize = 4;
+  const int kLocalSize = 3;
+
+  const double kTolerance = 1e-14;
+
+  double x_plus_delta[kGlobalSize] = {0.0, 0.0, 0.0, 0.0};
+  Parameterization parameterization;
+  parameterization.Plus(x, delta, x_plus_delta);
+  for (int i = 0; i < kGlobalSize; ++i) {
+    EXPECT_NEAR(x_plus_delta[i], x_plus_delta[i], kTolerance);
+  }
+
+  const double x_plus_delta_norm =
+      sqrt(x_plus_delta[0] * x_plus_delta[0] +
+           x_plus_delta[1] * x_plus_delta[1] +
+           x_plus_delta[2] * x_plus_delta[2] +
+           x_plus_delta[3] * x_plus_delta[3]);
+
+  EXPECT_NEAR(x_plus_delta_norm, 1.0, kTolerance);
+
+  double jacobian_ref[12];
+  double zero_delta[kLocalSize] = {0.0, 0.0, 0.0};
+  const double* parameters[2] = {x, zero_delta};
+  double* jacobian_array[2] = { NULL, jacobian_ref };
+
+  // Autodiff jacobian at delta_x = 0.
+  internal::AutoDifferentiate<StaticParameterDims<kGlobalSize, kLocalSize>>(
+      Plus(),
+      parameters,
+      kGlobalSize,
+      x_plus_delta,
+      jacobian_array);
+
+  double jacobian[12];
+  parameterization.ComputeJacobian(x, jacobian);
+  for (int i = 0; i < 12; ++i) {
+    EXPECT_TRUE(IsFinite(jacobian[i]));
+    EXPECT_NEAR(jacobian[i], jacobian_ref[i], kTolerance)
+        << "Jacobian mismatch: i = " << i
+        << "\n Expected \n"
+        << ConstMatrixRef(jacobian_ref, kGlobalSize, kLocalSize)
+        << "\n Actual \n"
+        << ConstMatrixRef(jacobian, kGlobalSize, kLocalSize);
+  }
+
+  Matrix global_matrix = Matrix::Random(10, kGlobalSize);
+  Matrix local_matrix = Matrix::Zero(10, kLocalSize);
+  parameterization.MultiplyByJacobian(x,
+                                      10,
+                                      global_matrix.data(),
+                                      local_matrix.data());
+  Matrix expected_local_matrix =
+      global_matrix * MatrixRef(jacobian, kGlobalSize, kLocalSize);
+  EXPECT_NEAR((local_matrix - expected_local_matrix).norm(),
+              0.0,
+              10.0 * std::numeric_limits<double>::epsilon());
+}
+
+template <int N>
+void Normalize(double* x) {
+  VectorRef(x, N).normalize();
+}
+
+TEST(QuaternionParameterization, ZeroTest) {
+  double x[4] = {0.5, 0.5, 0.5, 0.5};
+  double delta[3] = {0.0, 0.0, 0.0};
+  double q_delta[4] = {1.0, 0.0, 0.0, 0.0};
+  double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+  QuaternionProduct(q_delta, x, x_plus_delta);
+  QuaternionParameterizationTestHelper<QuaternionParameterization,
+                                       QuaternionPlus>(x, delta, x_plus_delta);
+}
+
+TEST(QuaternionParameterization, NearZeroTest) {
+  double x[4] = {0.52, 0.25, 0.15, 0.45};
+  Normalize<4>(x);
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  for (int i = 0; i < 3; ++i) {
+    delta[i] = delta[i] * 1e-14;
+  }
+
+  double q_delta[4];
+  q_delta[0] = 1.0;
+  q_delta[1] = delta[0];
+  q_delta[2] = delta[1];
+  q_delta[3] = delta[2];
+
+  double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+  QuaternionProduct(q_delta, x, x_plus_delta);
+  QuaternionParameterizationTestHelper<QuaternionParameterization,
+                                       QuaternionPlus>(x, delta, x_plus_delta);
+}
+
+TEST(QuaternionParameterization, AwayFromZeroTest) {
+  double x[4] = {0.52, 0.25, 0.15, 0.45};
+  Normalize<4>(x);
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  const double delta_norm = sqrt(delta[0] * delta[0] +
+                                 delta[1] * delta[1] +
+                                 delta[2] * delta[2]);
+  double q_delta[4];
+  q_delta[0] = cos(delta_norm);
+  q_delta[1] = sin(delta_norm) / delta_norm * delta[0];
+  q_delta[2] = sin(delta_norm) / delta_norm * delta[1];
+  q_delta[3] = sin(delta_norm) / delta_norm * delta[2];
+
+  double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+  QuaternionProduct(q_delta, x, x_plus_delta);
+  QuaternionParameterizationTestHelper<QuaternionParameterization,
+                                       QuaternionPlus>(x, delta, x_plus_delta);
+}
+
+// Functor needed to implement automatically differentiated Plus for
+// Eigen's quaternion.
+struct EigenQuaternionPlus {
+  template<typename T>
+  bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+    const T norm_delta =
+        sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
+
+    Eigen::Quaternion<T> q_delta;
+    if (norm_delta > T(0.0)) {
+      const T sin_delta_by_delta = sin(norm_delta) / norm_delta;
+      q_delta.coeffs() << sin_delta_by_delta * delta[0],
+          sin_delta_by_delta * delta[1], sin_delta_by_delta * delta[2],
+          cos(norm_delta);
+    } else {
+      // We do not just use q_delta = [0,0,0,1] here because that is a
+      // constant and when used for automatic differentiation will
+      // lead to a zero derivative. Instead we take a first order
+      // approximation and evaluate it at zero.
+      q_delta.coeffs() <<  delta[0], delta[1], delta[2], T(1.0);
+    }
+
+    Eigen::Map<Eigen::Quaternion<T>> x_plus_delta_ref(x_plus_delta);
+    Eigen::Map<const Eigen::Quaternion<T>> x_ref(x);
+    x_plus_delta_ref = q_delta * x_ref;
+    return true;
+  }
+};
+
+TEST(EigenQuaternionParameterization, ZeroTest) {
+  Eigen::Quaterniond x(0.5, 0.5, 0.5, 0.5);
+  double delta[3] = {0.0, 0.0, 0.0};
+  Eigen::Quaterniond q_delta(1.0, 0.0, 0.0, 0.0);
+  Eigen::Quaterniond x_plus_delta = q_delta * x;
+  QuaternionParameterizationTestHelper<EigenQuaternionParameterization,
+                                       EigenQuaternionPlus>(
+      x.coeffs().data(), delta, x_plus_delta.coeffs().data());
+}
+
+TEST(EigenQuaternionParameterization, NearZeroTest) {
+  Eigen::Quaterniond x(0.52, 0.25, 0.15, 0.45);
+  x.normalize();
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  for (int i = 0; i < 3; ++i) {
+    delta[i] = delta[i] * 1e-14;
+  }
+
+  // Note: w is first in the constructor.
+  Eigen::Quaterniond q_delta(1.0, delta[0], delta[1], delta[2]);
+
+  Eigen::Quaterniond x_plus_delta = q_delta * x;
+  QuaternionParameterizationTestHelper<EigenQuaternionParameterization,
+                                       EigenQuaternionPlus>(
+      x.coeffs().data(), delta, x_plus_delta.coeffs().data());
+}
+
+TEST(EigenQuaternionParameterization, AwayFromZeroTest) {
+  Eigen::Quaterniond x(0.52, 0.25, 0.15, 0.45);
+  x.normalize();
+
+  double delta[3] = {0.24, 0.15, 0.10};
+  const double delta_norm = sqrt(delta[0] * delta[0] +
+                                 delta[1] * delta[1] +
+                                 delta[2] * delta[2]);
+
+  // Note: w is first in the constructor.
+  Eigen::Quaterniond q_delta(cos(delta_norm),
+                             sin(delta_norm) / delta_norm * delta[0],
+                             sin(delta_norm) / delta_norm * delta[1],
+                             sin(delta_norm) / delta_norm * delta[2]);
+
+  Eigen::Quaterniond x_plus_delta = q_delta * x;
+  QuaternionParameterizationTestHelper<EigenQuaternionParameterization,
+                                       EigenQuaternionPlus>(
+      x.coeffs().data(), delta, x_plus_delta.coeffs().data());
+}
+
+// Functor needed to implement automatically differentiated Plus for
+// homogeneous vectors. Note this explicitly defined for vectors of size 4.
+struct HomogeneousVectorParameterizationPlus {
+  template<typename Scalar>
+  bool operator()(const Scalar* p_x, const Scalar* p_delta,
+                  Scalar* p_x_plus_delta) const {
+    Eigen::Map<const Eigen::Matrix<Scalar, 4, 1>> x(p_x);
+    Eigen::Map<const Eigen::Matrix<Scalar, 3, 1>> delta(p_delta);
+    Eigen::Map<Eigen::Matrix<Scalar, 4, 1>> x_plus_delta(p_x_plus_delta);
+
+    const Scalar squared_norm_delta =
+        delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
+
+    Eigen::Matrix<Scalar, 4, 1> y;
+    Scalar one_half(0.5);
+    if (squared_norm_delta > Scalar(0.0)) {
+      Scalar norm_delta = sqrt(squared_norm_delta);
+      Scalar norm_delta_div_2 = 0.5 * norm_delta;
+      const Scalar sin_delta_by_delta = sin(norm_delta_div_2) /
+          norm_delta_div_2;
+      y[0] = sin_delta_by_delta * delta[0] * one_half;
+      y[1] = sin_delta_by_delta * delta[1] * one_half;
+      y[2] = sin_delta_by_delta * delta[2] * one_half;
+      y[3] = cos(norm_delta_div_2);
+
+    } else {
+      // We do not just use y = [0,0,0,1] here because that is a
+      // constant and when used for automatic differentiation will
+      // lead to a zero derivative. Instead we take a first order
+      // approximation and evaluate it at zero.
+      y[0] = delta[0] * one_half;
+      y[1] = delta[1] * one_half;
+      y[2] = delta[2] * one_half;
+      y[3] = Scalar(1.0);
+    }
+
+    Eigen::Matrix<Scalar, Eigen::Dynamic, 1> v(4);
+    Scalar beta;
+    internal::ComputeHouseholderVector<Scalar>(x, &v, &beta);
+
+    x_plus_delta = x.norm() * (y - v * (beta * v.dot(y)));
+
+    return true;
+  }
+};
+
+void HomogeneousVectorParameterizationHelper(const double* x,
+                                             const double* delta) {
+  const double kTolerance = 1e-14;
+
+  HomogeneousVectorParameterization homogeneous_vector_parameterization(4);
+
+  // Ensure the update maintains the norm.
+  double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+  homogeneous_vector_parameterization.Plus(x, delta, x_plus_delta);
+
+  const double x_plus_delta_norm =
+      sqrt(x_plus_delta[0] * x_plus_delta[0] +
+           x_plus_delta[1] * x_plus_delta[1] +
+           x_plus_delta[2] * x_plus_delta[2] +
+           x_plus_delta[3] * x_plus_delta[3]);
+
+  const double x_norm = sqrt(x[0] * x[0] + x[1] * x[1] +
+                             x[2] * x[2] + x[3] * x[3]);
+
+  EXPECT_NEAR(x_plus_delta_norm, x_norm, kTolerance);
+
+  // Autodiff jacobian at delta_x = 0.
+  AutoDiffLocalParameterization<HomogeneousVectorParameterizationPlus, 4, 3>
+      autodiff_jacobian;
+
+  double jacobian_autodiff[12];
+  double jacobian_analytic[12];
+
+  homogeneous_vector_parameterization.ComputeJacobian(x, jacobian_analytic);
+  autodiff_jacobian.ComputeJacobian(x, jacobian_autodiff);
+
+  for (int i = 0; i < 12; ++i) {
+    EXPECT_TRUE(ceres::IsFinite(jacobian_analytic[i]));
+    EXPECT_NEAR(jacobian_analytic[i], jacobian_autodiff[i], kTolerance)
+        << "Jacobian mismatch: i = " << i << ", " << jacobian_analytic[i] << " "
+        << jacobian_autodiff[i];
+  }
+}
+
+TEST(HomogeneousVectorParameterization, ZeroTest) {
+  double x[4] = {0.0, 0.0, 0.0, 1.0};
+  Normalize<4>(x);
+  double delta[3] = {0.0, 0.0, 0.0};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, NearZeroTest1) {
+  double x[4] = {1e-5, 1e-5, 1e-5, 1.0};
+  Normalize<4>(x);
+  double delta[3] = {0.0, 1.0, 0.0};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, NearZeroTest2) {
+  double x[4] = {0.001, 0.0, 0.0, 0.0};
+  double delta[3] = {0.0, 1.0, 0.0};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, AwayFromZeroTest1) {
+  double x[4] = {0.52, 0.25, 0.15, 0.45};
+  Normalize<4>(x);
+  double delta[3] = {0.0, 1.0, -0.5};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, AwayFromZeroTest2) {
+  double x[4] = {0.87, -0.25, -0.34, 0.45};
+  Normalize<4>(x);
+  double delta[3] = {0.0, 0.0, -0.5};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, AwayFromZeroTest3) {
+  double x[4] = {0.0, 0.0, 0.0, 2.0};
+  double delta[3] = {0.0, 0.0, 0};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, AwayFromZeroTest4) {
+  double x[4] = {0.2, -1.0, 0.0, 2.0};
+  double delta[3] = {1.4, 0.0, -0.5};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, AwayFromZeroTest5) {
+  double x[4] = {2.0, 0.0, 0.0, 0.0};
+  double delta[3] = {1.4, 0.0, -0.5};
+
+  HomogeneousVectorParameterizationHelper(x, delta);
+}
+
+TEST(HomogeneousVectorParameterization, DeathTests) {
+  EXPECT_DEATH_IF_SUPPORTED(HomogeneousVectorParameterization x(1), "size");
+}
+
+
+class ProductParameterizationTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    const int global_size1 = 5;
+    std::vector<int> constant_parameters1;
+    constant_parameters1.push_back(2);
+    param1_.reset(new SubsetParameterization(global_size1,
+                                             constant_parameters1));
+
+    const int global_size2 = 3;
+    std::vector<int> constant_parameters2;
+    constant_parameters2.push_back(0);
+    constant_parameters2.push_back(1);
+    param2_.reset(new SubsetParameterization(global_size2,
+                                             constant_parameters2));
+
+    const int global_size3 = 4;
+    std::vector<int> constant_parameters3;
+    constant_parameters3.push_back(1);
+    param3_.reset(new SubsetParameterization(global_size3,
+                                             constant_parameters3));
+
+    const int global_size4 = 2;
+    std::vector<int> constant_parameters4;
+    constant_parameters4.push_back(1);
+    param4_.reset(new SubsetParameterization(global_size4,
+                                             constant_parameters4));
+  }
+
+  std::unique_ptr<LocalParameterization> param1_;
+  std::unique_ptr<LocalParameterization> param2_;
+  std::unique_ptr<LocalParameterization> param3_;
+  std::unique_ptr<LocalParameterization> param4_;
+};
+
+TEST_F(ProductParameterizationTest, LocalAndGlobalSize2) {
+  LocalParameterization* param1 = param1_.release();
+  LocalParameterization* param2 = param2_.release();
+
+  ProductParameterization product_param(param1, param2);
+  EXPECT_EQ(product_param.LocalSize(),
+            param1->LocalSize() + param2->LocalSize());
+  EXPECT_EQ(product_param.GlobalSize(),
+            param1->GlobalSize() + param2->GlobalSize());
+}
+
+
+TEST_F(ProductParameterizationTest, LocalAndGlobalSize3) {
+  LocalParameterization* param1 = param1_.release();
+  LocalParameterization* param2 = param2_.release();
+  LocalParameterization* param3 = param3_.release();
+
+  ProductParameterization product_param(param1, param2, param3);
+  EXPECT_EQ(product_param.LocalSize(),
+            param1->LocalSize() + param2->LocalSize() + param3->LocalSize());
+  EXPECT_EQ(product_param.GlobalSize(),
+            param1->GlobalSize() + param2->GlobalSize() + param3->GlobalSize());
+}
+
+TEST_F(ProductParameterizationTest, LocalAndGlobalSize4) {
+  LocalParameterization* param1 = param1_.release();
+  LocalParameterization* param2 = param2_.release();
+  LocalParameterization* param3 = param3_.release();
+  LocalParameterization* param4 = param4_.release();
+
+  ProductParameterization product_param(param1, param2, param3, param4);
+  EXPECT_EQ(product_param.LocalSize(),
+            param1->LocalSize() +
+            param2->LocalSize() +
+            param3->LocalSize() +
+            param4->LocalSize());
+  EXPECT_EQ(product_param.GlobalSize(),
+            param1->GlobalSize() +
+            param2->GlobalSize() +
+            param3->GlobalSize() +
+            param4->GlobalSize());
+}
+
+TEST_F(ProductParameterizationTest, Plus) {
+  LocalParameterization* param1 = param1_.release();
+  LocalParameterization* param2 = param2_.release();
+  LocalParameterization* param3 = param3_.release();
+  LocalParameterization* param4 = param4_.release();
+
+  ProductParameterization product_param(param1, param2, param3, param4);
+  std::vector<double> x(product_param.GlobalSize(), 0.0);
+  std::vector<double> delta(product_param.LocalSize(), 0.0);
+  std::vector<double> x_plus_delta_expected(product_param.GlobalSize(), 0.0);
+  std::vector<double> x_plus_delta(product_param.GlobalSize(), 0.0);
+
+  for (int i = 0; i < product_param.GlobalSize(); ++i) {
+    x[i] = RandNormal();
+  }
+
+  for (int i = 0; i < product_param.LocalSize(); ++i) {
+    delta[i] = RandNormal();
+  }
+
+  EXPECT_TRUE(product_param.Plus(&x[0], &delta[0], &x_plus_delta_expected[0]));
+  int x_cursor = 0;
+  int delta_cursor = 0;
+
+  EXPECT_TRUE(param1->Plus(&x[x_cursor],
+                           &delta[delta_cursor],
+                           &x_plus_delta[x_cursor]));
+  x_cursor += param1->GlobalSize();
+  delta_cursor += param1->LocalSize();
+
+  EXPECT_TRUE(param2->Plus(&x[x_cursor],
+                           &delta[delta_cursor],
+                           &x_plus_delta[x_cursor]));
+  x_cursor += param2->GlobalSize();
+  delta_cursor += param2->LocalSize();
+
+  EXPECT_TRUE(param3->Plus(&x[x_cursor],
+                           &delta[delta_cursor],
+                           &x_plus_delta[x_cursor]));
+  x_cursor += param3->GlobalSize();
+  delta_cursor += param3->LocalSize();
+
+  EXPECT_TRUE(param4->Plus(&x[x_cursor],
+                           &delta[delta_cursor],
+                           &x_plus_delta[x_cursor]));
+  x_cursor += param4->GlobalSize();
+  delta_cursor += param4->LocalSize();
+
+  for (int i = 0; i < x.size(); ++i) {
+    EXPECT_EQ(x_plus_delta[i], x_plus_delta_expected[i]);
+  }
+}
+
+TEST_F(ProductParameterizationTest, ComputeJacobian) {
+  LocalParameterization* param1 = param1_.release();
+  LocalParameterization* param2 = param2_.release();
+  LocalParameterization* param3 = param3_.release();
+  LocalParameterization* param4 = param4_.release();
+
+  ProductParameterization product_param(param1, param2, param3, param4);
+  std::vector<double> x(product_param.GlobalSize(), 0.0);
+
+  for (int i = 0; i < product_param.GlobalSize(); ++i) {
+    x[i] = RandNormal();
+  }
+
+  Matrix jacobian = Matrix::Random(product_param.GlobalSize(),
+                                   product_param.LocalSize());
+  EXPECT_TRUE(product_param.ComputeJacobian(&x[0], jacobian.data()));
+  int x_cursor = 0;
+  int delta_cursor = 0;
+
+  Matrix jacobian1(param1->GlobalSize(), param1->LocalSize());
+  EXPECT_TRUE(param1->ComputeJacobian(&x[x_cursor], jacobian1.data()));
+  jacobian.block(x_cursor, delta_cursor,
+                 param1->GlobalSize(),
+                 param1->LocalSize())
+      -= jacobian1;
+  x_cursor += param1->GlobalSize();
+  delta_cursor += param1->LocalSize();
+
+  Matrix jacobian2(param2->GlobalSize(), param2->LocalSize());
+  EXPECT_TRUE(param2->ComputeJacobian(&x[x_cursor], jacobian2.data()));
+  jacobian.block(x_cursor, delta_cursor,
+                 param2->GlobalSize(),
+                 param2->LocalSize())
+      -= jacobian2;
+  x_cursor += param2->GlobalSize();
+  delta_cursor += param2->LocalSize();
+
+  Matrix jacobian3(param3->GlobalSize(), param3->LocalSize());
+  EXPECT_TRUE(param3->ComputeJacobian(&x[x_cursor], jacobian3.data()));
+  jacobian.block(x_cursor, delta_cursor,
+                 param3->GlobalSize(),
+                 param3->LocalSize())
+      -= jacobian3;
+  x_cursor += param3->GlobalSize();
+  delta_cursor += param3->LocalSize();
+
+  Matrix jacobian4(param4->GlobalSize(), param4->LocalSize());
+  EXPECT_TRUE(param4->ComputeJacobian(&x[x_cursor], jacobian4.data()));
+  jacobian.block(x_cursor, delta_cursor,
+                 param4->GlobalSize(),
+                 param4->LocalSize())
+      -= jacobian4;
+  x_cursor += param4->GlobalSize();
+  delta_cursor += param4->LocalSize();
+
+  EXPECT_NEAR(jacobian.norm(), 0.0, std::numeric_limits<double>::epsilon());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
new file mode 100644
index 0000000..bf41b9e
--- /dev/null
+++ b/internal/ceres/loss_function.cc
@@ -0,0 +1,177 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Purpose: See .h file.
+
+#include "ceres/loss_function.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <limits>
+
+namespace ceres {
+
+void TrivialLoss::Evaluate(double s, double rho[3]) const {
+  rho[0] = s;
+  rho[1] = 1.0;
+  rho[2] = 0.0;
+}
+
+void HuberLoss::Evaluate(double s, double rho[3]) const {
+  if (s > b_) {
+    // Outlier region.
+    // 'r' is always positive.
+    const double r = sqrt(s);
+    rho[0] = 2.0 * a_ * r - b_;
+    rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
+    rho[2] = - rho[1] / (2.0 * s);
+  } else {
+    // Inlier region.
+    rho[0] = s;
+    rho[1] = 1.0;
+    rho[2] = 0.0;
+  }
+}
+
+void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
+  const double sum = 1.0 + s * c_;
+  const double tmp = sqrt(sum);
+  // 'sum' and 'tmp' are always positive, assuming that 's' is.
+  rho[0] = 2.0 * b_ * (tmp - 1.0);
+  rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
+  rho[2] = - (c_ * rho[1]) / (2.0 * sum);
+}
+
+void CauchyLoss::Evaluate(double s, double rho[3]) const {
+  const double sum = 1.0 + s * c_;
+  const double inv = 1.0 / sum;
+  // 'sum' and 'inv' are always positive, assuming that 's' is.
+  rho[0] = b_ * log(sum);
+  rho[1] = std::max(std::numeric_limits<double>::min(), inv);
+  rho[2] = - c_ * (inv * inv);
+}
+
+void ArctanLoss::Evaluate(double s, double rho[3]) const {
+  const double sum = 1 + s * s * b_;
+  const double inv = 1 / sum;
+  // 'sum' and 'inv' are always positive.
+  rho[0] = a_ * atan2(s, a_);
+  rho[1] = std::max(std::numeric_limits<double>::min(), inv);
+  rho[2] = -2.0 * s * b_ * (inv * inv);
+}
+
+TolerantLoss::TolerantLoss(double a, double b)
+    : a_(a),
+      b_(b),
+      c_(b * log(1.0 + exp(-a / b))) {
+  CHECK_GE(a, 0.0);
+  CHECK_GT(b, 0.0);
+}
+
+void TolerantLoss::Evaluate(double s, double rho[3]) const {
+  const double x = (s - a_) / b_;
+  // The basic equation is rho[0] = b ln(1 + e^x).  However, if e^x is too
+  // large, it will overflow.  Since numerically 1 + e^x == e^x when the
+  // x is greater than about ln(2^53) for doubles, beyond this threshold
+  // we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
+  static const double kLog2Pow53 = 36.7;  // ln(MathLimits<double>::kEpsilon).
+  if (x > kLog2Pow53) {
+    rho[0] = s - a_ - c_;
+    rho[1] = 1.0;
+    rho[2] = 0.0;
+  } else {
+    const double e_x = exp(x);
+    rho[0] = b_ * log(1.0 + e_x) - c_;
+    rho[1] = std::max(std::numeric_limits<double>::min(), e_x / (1.0 + e_x));
+    rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
+  }
+}
+
+void TukeyLoss::Evaluate(double s, double* rho) const {
+  if (s <= a_squared_) {
+    // Inlier region.
+    const double value = 1.0 - s / a_squared_;
+    const double value_sq = value * value;
+    rho[0] = a_squared_ / 6.0 * (1.0 - value_sq * value);
+    rho[1] = 0.5 * value_sq;
+    rho[2] = -1.0 / a_squared_ * value;
+  } else {
+    // Outlier region.
+    rho[0] = a_squared_ / 6.0;
+    rho[1] = 0.0;
+    rho[2] = 0.0;
+  }
+}
+
+ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
+                           const LossFunction* g, Ownership ownership_g)
+    : f_(f),
+      g_(g),
+      ownership_f_(ownership_f),
+      ownership_g_(ownership_g) {
+  CHECK(f_ != nullptr);
+  CHECK(g_ != nullptr);
+}
+
+ComposedLoss::~ComposedLoss() {
+  if (ownership_f_ == DO_NOT_TAKE_OWNERSHIP) {
+    f_.release();
+  }
+  if (ownership_g_ == DO_NOT_TAKE_OWNERSHIP) {
+    g_.release();
+  }
+}
+
+void ComposedLoss::Evaluate(double s, double rho[3]) const {
+  double rho_f[3], rho_g[3];
+  g_->Evaluate(s, rho_g);
+  f_->Evaluate(rho_g[0], rho_f);
+  rho[0] = rho_f[0];
+  // f'(g(s)) * g'(s).
+  rho[1] = rho_f[1] * rho_g[1];
+  // f''(g(s)) * g'(s) * g'(s) + f'(g(s)) * g''(s).
+  rho[2] = rho_f[2] * rho_g[1] * rho_g[1] + rho_f[1] * rho_g[2];
+}
+
+void ScaledLoss::Evaluate(double s, double rho[3]) const {
+  if (rho_.get() == NULL) {
+    rho[0] = a_ * s;
+    rho[1] = a_;
+    rho[2] = 0.0;
+  } else {
+    rho_->Evaluate(s, rho);
+    rho[0] *= a_;
+    rho[1] *= a_;
+    rho[2] *= a_;
+  }
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/loss_function_test.cc b/internal/ceres/loss_function_test.cc
new file mode 100644
index 0000000..406ace7
--- /dev/null
+++ b/internal/ceres/loss_function_test.cc
@@ -0,0 +1,252 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/loss_function.h"
+
+#include <cstddef>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Helper function for testing a LossFunction callback.
+//
+// Compares the values of rho'(s) and rho''(s) computed by the
+// callback with estimates obtained by symmetric finite differencing
+// of rho(s).
+void AssertLossFunctionIsValid(const LossFunction& loss, double s) {
+  CHECK_GT(s, 0);
+
+  // Evaluate rho(s), rho'(s) and rho''(s).
+  double rho[3];
+  loss.Evaluate(s, rho);
+
+  // Use symmetric finite differencing to estimate rho'(s) and
+  // rho''(s).
+  const double kH = 1e-4;
+  // Values at s + kH.
+  double fwd[3];
+  // Values at s - kH.
+  double bwd[3];
+  loss.Evaluate(s + kH, fwd);
+  loss.Evaluate(s - kH, bwd);
+
+  // First derivative.
+  const double fd_1 = (fwd[0] - bwd[0]) / (2 * kH);
+  ASSERT_NEAR(fd_1, rho[1], 1e-6);
+
+  // Second derivative.
+  const double fd_2 = (fwd[0] - 2*rho[0] + bwd[0]) / (kH * kH);
+  ASSERT_NEAR(fd_2, rho[2], 1e-6);
+}
+}  // namespace
+
+// Try two values of the scaling a = 0.7 and 1.3
+// (where scaling makes sense) and of the squared norm
+// s = 0.357 and 1.792
+//
+// Note that for the Huber loss the test exercises both code paths
+//  (i.e. both small and large values of s).
+
+TEST(LossFunction, TrivialLoss) {
+  AssertLossFunctionIsValid(TrivialLoss(), 0.357);
+  AssertLossFunctionIsValid(TrivialLoss(), 1.792);
+}
+
+TEST(LossFunction, HuberLoss) {
+  AssertLossFunctionIsValid(HuberLoss(0.7), 0.357);
+  AssertLossFunctionIsValid(HuberLoss(0.7), 1.792);
+  AssertLossFunctionIsValid(HuberLoss(1.3), 0.357);
+  AssertLossFunctionIsValid(HuberLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, SoftLOneLoss) {
+  AssertLossFunctionIsValid(SoftLOneLoss(0.7), 0.357);
+  AssertLossFunctionIsValid(SoftLOneLoss(0.7), 1.792);
+  AssertLossFunctionIsValid(SoftLOneLoss(1.3), 0.357);
+  AssertLossFunctionIsValid(SoftLOneLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, CauchyLoss) {
+  AssertLossFunctionIsValid(CauchyLoss(0.7), 0.357);
+  AssertLossFunctionIsValid(CauchyLoss(0.7), 1.792);
+  AssertLossFunctionIsValid(CauchyLoss(1.3), 0.357);
+  AssertLossFunctionIsValid(CauchyLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, ArctanLoss) {
+  AssertLossFunctionIsValid(ArctanLoss(0.7), 0.357);
+  AssertLossFunctionIsValid(ArctanLoss(0.7), 1.792);
+  AssertLossFunctionIsValid(ArctanLoss(1.3), 0.357);
+  AssertLossFunctionIsValid(ArctanLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, TolerantLoss) {
+  AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 0.357);
+  AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 1.792);
+  AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 55.5);
+  AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 0.357);
+  AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 1.792);
+  AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 55.5);
+  // Check the value at zero is actually zero.
+  double rho[3];
+  TolerantLoss(0.7, 0.4).Evaluate(0.0, rho);
+  ASSERT_NEAR(rho[0], 0.0, 1e-6);
+  // Check that loss before and after the approximation threshold are good.
+  // A threshold of 36.7 is used by the implementation.
+  AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.6);
+  AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.7);
+  AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.8);
+  AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 1000.0);
+}
+
+TEST(LossFunction, TukeyLoss) {
+  AssertLossFunctionIsValid(TukeyLoss(0.7), 0.357);
+  AssertLossFunctionIsValid(TukeyLoss(0.7), 1.792);
+  AssertLossFunctionIsValid(TukeyLoss(1.3), 0.357);
+  AssertLossFunctionIsValid(TukeyLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, ComposedLoss) {
+  {
+    HuberLoss f(0.7);
+    CauchyLoss g(1.3);
+    ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(c, 0.357);
+    AssertLossFunctionIsValid(c, 1.792);
+  }
+  {
+    CauchyLoss f(0.7);
+    HuberLoss g(1.3);
+    ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(c, 0.357);
+    AssertLossFunctionIsValid(c, 1.792);
+  }
+}
+
+TEST(LossFunction, ScaledLoss) {
+  // Wrap a few loss functions, and a few scale factors. This can't combine
+  // construction with the call to AssertLossFunctionIsValid() because Apple's
+  // GCC is unable to eliminate the copy of ScaledLoss, which is not copyable.
+  {
+    ScaledLoss scaled_loss(NULL, 6, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 0.323);
+  }
+  {
+    ScaledLoss scaled_loss(new TrivialLoss(), 10, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 0.357);
+  }
+  {
+    ScaledLoss scaled_loss(new HuberLoss(0.7), 0.1, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+  {
+    ScaledLoss scaled_loss(new SoftLOneLoss(1.3), 0.1, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+  {
+    ScaledLoss scaled_loss(new CauchyLoss(1.3), 10, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+  {
+    ScaledLoss scaled_loss(new ArctanLoss(1.3), 10, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+  {
+    ScaledLoss scaled_loss(
+        new TolerantLoss(1.3, 0.1), 10, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+  {
+    ScaledLoss scaled_loss(
+        new ComposedLoss(
+            new HuberLoss(0.8), TAKE_OWNERSHIP,
+            new TolerantLoss(1.3, 0.5), TAKE_OWNERSHIP), 10, TAKE_OWNERSHIP);
+    AssertLossFunctionIsValid(scaled_loss, 1.792);
+  }
+}
+
+TEST(LossFunction, LossFunctionWrapper) {
+  // Initialization
+  HuberLoss loss_function1(1.0);
+  LossFunctionWrapper loss_function_wrapper(new HuberLoss(1.0),
+                                            TAKE_OWNERSHIP);
+
+  double s = 0.862;
+  double rho_gold[3];
+  double rho[3];
+  loss_function1.Evaluate(s, rho_gold);
+  loss_function_wrapper.Evaluate(s, rho);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+  }
+
+  // Resetting
+  HuberLoss loss_function2(0.5);
+  loss_function_wrapper.Reset(new HuberLoss(0.5), TAKE_OWNERSHIP);
+  loss_function_wrapper.Evaluate(s, rho);
+  loss_function2.Evaluate(s, rho_gold);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+  }
+
+  // Not taking ownership.
+  HuberLoss loss_function3(0.3);
+  loss_function_wrapper.Reset(&loss_function3, DO_NOT_TAKE_OWNERSHIP);
+  loss_function_wrapper.Evaluate(s, rho);
+  loss_function3.Evaluate(s, rho_gold);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+  }
+
+  // Set to NULL
+  TrivialLoss loss_function4;
+  loss_function_wrapper.Reset(NULL, TAKE_OWNERSHIP);
+  loss_function_wrapper.Evaluate(s, rho);
+  loss_function4.Evaluate(s, rho_gold);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+  }
+
+  // Set to NULL, not taking ownership
+  loss_function_wrapper.Reset(NULL, DO_NOT_TAKE_OWNERSHIP);
+  loss_function_wrapper.Evaluate(s, rho);
+  loss_function4.Evaluate(s, rho_gold);
+  for (int i = 0; i < 3; ++i) {
+    EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+  }
+
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
new file mode 100644
index 0000000..f3953c4
--- /dev/null
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -0,0 +1,186 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <list>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/low_rank_inverse_hessian.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::list;
+
+// The (L)BFGS algorithm explicitly requires that the secant equation:
+//
+//   B_{k+1} * s_k = y_k
+//
+// Is satisfied at each iteration, where B_{k+1} is the approximated
+// Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+// y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+// positive definite, this is equivalent to the condition:
+//
+//   s_k^T * y_k > 0     [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+//
+// This condition would always be satisfied if the function was strictly
+// convex, alternatively, it is always satisfied provided that a Wolfe line
+// search is used (even if the function is not strictly convex).  See [1]
+// (p138) for a proof.
+//
+// Although Ceres will always use a Wolfe line search when using (L)BFGS,
+// practical implementation considerations mean that the line search
+// may return a point that satisfies only the Armijo condition, and thus
+// could violate the Secant equation.  As such, we will only use a step
+// to update the Hessian approximation if:
+//
+//   s_k^T * y_k > tolerance
+//
+// It is important that tolerance is very small (and >=0), as otherwise we
+// might skip the update too often and fail to capture important curvature
+// information in the Hessian.  For example going from 1e-10 -> 1e-14 improves
+// the NIST benchmark score from 43/54 to 53/54.
+//
+// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
+//
+// TODO(alexs.mac): Consider using Damped BFGS update instead of
+// skipping update.
+const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+
+LowRankInverseHessian::LowRankInverseHessian(
+    int num_parameters,
+    int max_num_corrections,
+    bool use_approximate_eigenvalue_scaling)
+    : num_parameters_(num_parameters),
+      max_num_corrections_(max_num_corrections),
+      use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
+      approximate_eigenvalue_scale_(1.0),
+      delta_x_history_(num_parameters, max_num_corrections),
+      delta_gradient_history_(num_parameters, max_num_corrections),
+      delta_x_dot_delta_gradient_(max_num_corrections) {
+}
+
+bool LowRankInverseHessian::Update(const Vector& delta_x,
+                                   const Vector& delta_gradient) {
+  const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
+  if (delta_x_dot_delta_gradient <=
+      kLBFGSSecantConditionHessianUpdateTolerance) {
+    VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
+            << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+            << kLBFGSSecantConditionHessianUpdateTolerance
+            << " (Secant condition).";
+    return false;
+  }
+
+
+  int next = indices_.size();
+  // Once the size of the list reaches max_num_corrections_, simulate
+  // a circular buffer by removing the first element of the list and
+  // making it the next position where the LBFGS history is stored.
+  if (next == max_num_corrections_) {
+    next = indices_.front();
+    indices_.pop_front();
+  }
+
+  indices_.push_back(next);
+  delta_x_history_.col(next) = delta_x;
+  delta_gradient_history_.col(next) = delta_gradient;
+  delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient;
+  approximate_eigenvalue_scale_ =
+      delta_x_dot_delta_gradient / delta_gradient.squaredNorm();
+  return true;
+}
+
+void LowRankInverseHessian::RightMultiply(const double* x_ptr,
+                                          double* y_ptr) const {
+  ConstVectorRef gradient(x_ptr, num_parameters_);
+  VectorRef search_direction(y_ptr, num_parameters_);
+
+  search_direction = gradient;
+
+  const int num_corrections = indices_.size();
+  Vector alpha(num_corrections);
+
+  for (list<int>::const_reverse_iterator it = indices_.rbegin();
+       it != indices_.rend();
+       ++it) {
+    const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
+        delta_x_dot_delta_gradient_(*it);
+    search_direction -= alpha_i * delta_gradient_history_.col(*it);
+    alpha(*it) = alpha_i;
+  }
+
+  if (use_approximate_eigenvalue_scaling_) {
+    // Rescale the initial inverse Hessian approximation (H_0) to be iteratively
+    // updated so that it is of similar 'size' to the true inverse Hessian along
+    // the most recent search direction.  As shown in [1]:
+    //
+    //   \gamma_k = (delta_gradient_{k-1}' * delta_x_{k-1}) /
+    //              (delta_gradient_{k-1}' * delta_gradient_{k-1})
+    //
+    // Satisfies:
+    //
+    //   (1 / \lambda_m) <= \gamma_k <= (1 / \lambda_1)
+    //
+    // Where \lambda_1 & \lambda_m are the smallest and largest eigenvalues of
+    // the true Hessian (not the inverse) along the most recent search direction
+    // respectively.  Thus \gamma is an approximate eigenvalue of the true
+    // inverse Hessian, and choosing: H_0 = I * \gamma will yield a starting
+    // point that has a similar scale to the true inverse Hessian.  This
+    // technique is widely reported to often improve convergence, however this
+    // is not universally true, particularly if there are errors in the initial
+    // jacobians, or if there are significant differences in the sensitivity
+    // of the problem to the parameters (i.e. the range of the magnitudes of
+    // the components of the gradient is large).
+    //
+    // The original origin of this rescaling trick is somewhat unclear, the
+    // earliest reference appears to be Oren [1], however it is widely discussed
+    // without specific attributation in various texts including [2] (p143/178).
+    //
+    // [1] Oren S.S., Self-scaling variable metric (SSVM) algorithms Part II:
+    //     Implementation and experiments, Management Science,
+    //     20(5), 863-874, 1974.
+    // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
+    search_direction *= approximate_eigenvalue_scale_;
+
+    VLOG(4) << "Applying approximate_eigenvalue_scale: "
+            << approximate_eigenvalue_scale_ << " to initial inverse Hessian "
+            << "approximation.";
+  }
+
+  for (const int i : indices_) {
+    const double beta = delta_gradient_history_.col(i).dot(search_direction) /
+        delta_x_dot_delta_gradient_(i);
+    search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/low_rank_inverse_hessian.h b/internal/ceres/low_rank_inverse_hessian.h
new file mode 100644
index 0000000..2c768c2
--- /dev/null
+++ b/internal/ceres/low_rank_inverse_hessian.h
@@ -0,0 +1,108 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Limited memory positive definite approximation to the inverse
+// Hessian, using the LBFGS algorithm
+
+#ifndef CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
+#define CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
+
+#include <list>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
+
+namespace ceres {
+namespace internal {
+
+// LowRankInverseHessian is a positive definite approximation to the
+// Hessian using the limited memory variant of the
+// Broyden-Fletcher-Goldfarb-Shanno (BFGS)secant formula for
+// approximating the Hessian.
+//
+// Other update rules like the Davidon-Fletcher-Powell (DFP) are
+// possible, but the BFGS rule is considered the best performing one.
+//
+// The limited memory variant was developed by Nocedal and further
+// enhanced with scaling rule by Byrd, Nocedal and Schanbel.
+//
+// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
+// Storage". Mathematics of Computation 35 (151): 773–782.
+//
+// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
+// "Representations of Quasi-Newton Matrices and their use in
+// Limited Memory Methods". Mathematical Programming 63 (4):
+class LowRankInverseHessian : public LinearOperator {
+ public:
+  // num_parameters is the row/column size of the Hessian.
+  // max_num_corrections is the rank of the Hessian approximation.
+  // use_approximate_eigenvalue_scaling controls whether the initial
+  // inverse Hessian used during Right/LeftMultiply() is scaled by
+  // the approximate eigenvalue of the true inverse Hessian at the
+  // current operating point.
+  // The approximation uses:
+  // 2 * max_num_corrections * num_parameters + max_num_corrections
+  // doubles.
+  LowRankInverseHessian(int num_parameters,
+                        int max_num_corrections,
+                        bool use_approximate_eigenvalue_scaling);
+  virtual ~LowRankInverseHessian() {}
+
+  // Update the low rank approximation. delta_x is the change in the
+  // domain of Hessian, and delta_gradient is the change in the
+  // gradient.  The update copies the delta_x and delta_gradient
+  // vectors, and gets rid of the oldest delta_x and delta_gradient
+  // vectors if the number of corrections is already equal to
+  // max_num_corrections.
+  bool Update(const Vector& delta_x, const Vector& delta_gradient);
+
+  // LinearOperator interface
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual void LeftMultiply(const double* x, double* y) const {
+    RightMultiply(x, y);
+  }
+  virtual int num_rows() const { return num_parameters_; }
+  virtual int num_cols() const { return num_parameters_; }
+
+ private:
+  const int num_parameters_;
+  const int max_num_corrections_;
+  const bool use_approximate_eigenvalue_scaling_;
+  double approximate_eigenvalue_scale_;
+  ColMajorMatrix delta_x_history_;
+  ColMajorMatrix delta_gradient_history_;
+  Vector delta_x_dot_delta_gradient_;
+  std::list<int> indices_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
diff --git a/internal/ceres/map_util.h b/internal/ceres/map_util.h
new file mode 100644
index 0000000..f55aee3
--- /dev/null
+++ b/internal/ceres/map_util.h
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Originally by Anton Carver
+
+#ifndef CERES_INTERNAL_MAP_UTIL_H_
+#define CERES_INTERNAL_MAP_UTIL_H_
+
+#include <utility>
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+// Perform a lookup in a map or hash_map, assuming that the key exists.
+// Crash if it does not.
+//
+// This is intended as a replacement for operator[] as an rvalue (for reading)
+// when the key is guaranteed to exist.
+//
+// operator[] is discouraged for several reasons:
+//  * It has a side-effect of inserting missing keys
+//  * It is not thread-safe (even when it is not inserting, it can still
+//      choose to resize the underlying storage)
+//  * It invalidates iterators (when it chooses to resize)
+//  * It default constructs a value object even if it doesn't need to
+//
+// This version assumes the key is printable, and includes it in the fatal log
+// message.
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindOrDie(const Collection& collection,
+          const typename Collection::value_type::first_type& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  CHECK(it != collection.end()) << "Map key not found: " << key;
+  return it->second;
+}
+
+// Perform a lookup in a map or hash_map.
+// If the key is present in the map then the value associated with that
+// key is returned, otherwise the value passed as a default is returned.
+template <class Collection>
+const typename Collection::value_type::second_type
+FindWithDefault(const Collection& collection,
+                const typename Collection::value_type::first_type& key,
+                const typename Collection::value_type::second_type& value) {
+  typename Collection::const_iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return value;
+  }
+  return it->second;
+}
+
+// Insert a new key and value into a map or hash_map.
+// If the key is not present in the map the key and value are
+// inserted, otherwise nothing happens. True indicates that an insert
+// took place, false indicates the key was already present.
+template <class Collection>
+bool InsertIfNotPresent(
+    Collection * const collection,
+    const typename Collection::value_type::first_type& key,
+    const typename Collection::value_type::second_type& value) {
+  std::pair<typename Collection::iterator, bool> ret =
+      collection->insert(typename Collection::value_type(key, value));
+  return ret.second;
+}
+
+// Perform a lookup in a map or hash_map.
+// Same as above but the returned pointer is not const and can be used to change
+// the stored value.
+template <class Collection>
+typename Collection::value_type::second_type*
+FindOrNull(Collection& collection,  // NOLINT
+           const typename Collection::value_type::first_type& key) {
+  typename Collection::iterator it = collection.find(key);
+  if (it == collection.end()) {
+    return 0;
+  }
+  return &it->second;
+}
+
+// Test to see if a set, map, hash_set or hash_map contains a particular key.
+// Returns true if the key is in the collection.
+template <class Collection, class Key>
+bool ContainsKey(const Collection& collection, const Key& key) {
+  typename Collection::const_iterator it = collection.find(key);
+  return it != collection.end();
+}
+
+// Inserts a new key/value into a map or hash_map.
+// Dies if the key is already present.
+template<class Collection>
+void InsertOrDie(Collection* const collection,
+                 const typename Collection::value_type::first_type& key,
+                 const typename Collection::value_type::second_type& data) {
+  typedef typename Collection::value_type value_type;
+  CHECK(collection->insert(value_type(key, data)).second)
+    << "duplicate key: " << key;
+}
+
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_MAP_UTIL_H_
diff --git a/internal/ceres/miniglog/glog/logging.cc b/internal/ceres/miniglog/glog/logging.cc
new file mode 100644
index 0000000..372ecb0
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.cc
@@ -0,0 +1,39 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "glog/logging.h"
+
+namespace google {
+
+// This is the set of log sinks. This must be in a separate library to ensure
+// that there is only one instance of this across the entire program.
+std::set<google::LogSink *> log_sinks_global;
+
+}  // namespace ceres
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
new file mode 100644
index 0000000..0fdf382
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -0,0 +1,425 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: settinger@google.com (Scott Ettinger)
+//         mierle@gmail.com (Keir Mierle)
+//
+// Simplified Glog style logging with Android support. Supported macros in
+// decreasing severity level per line:
+//
+//   VLOG(2), VLOG(N)
+//   VLOG(1),
+//   LOG(INFO), VLOG(0), LG
+//   LOG(WARNING),
+//   LOG(ERROR),
+//   LOG(FATAL),
+//
+// With VLOG(n), the output is directed to one of the 5 Android log levels:
+//
+//   2 - Verbose
+//   1 - Debug
+//   0 - Info
+//  -1 - Warning
+//  -2 - Error
+//  -3 - Fatal
+//
+// Any logging of level 2 and above is directed to the Verbose level. All
+// Android log output is tagged with the string "native".
+//
+// If the symbol ANDROID is not defined, all output goes to std::cerr.
+// This allows code to be built on a different system for debug.
+//
+// Portions of this code are taken from the GLOG package.  This code is only a
+// small subset of the GLOG functionality. Notable differences from GLOG
+// behavior include lack of support for displaying unprintable characters and
+// lack of stack trace information upon failure of the CHECK macros.  On
+// non-Android systems, log output goes to std::cerr and is not written to a
+// file.
+//
+// CHECK macros are defined to test for conditions within code.  Any CHECK that
+// fails will log the failure and terminate the application.
+// e.g. CHECK_GE(3, 2) will pass while CHECK_GE(3, 4) will fail after logging
+//      "Check failed 3 >= 4".
+//
+// The following CHECK macros are defined:
+//
+//   CHECK(condition)        - fails if condition is false and logs condition.
+//   CHECK_NOTNULL(variable) - fails if the variable is NULL.
+//
+// The following binary check macros are also defined :
+//
+//   Macro                     Operator equivalent
+//   --------------------      -------------------
+//   CHECK_EQ(val1, val2)      val1 == val2
+//   CHECK_NE(val1, val2)      val1 != val2
+//   CHECK_GT(val1, val2)      val1 > val2
+//   CHECK_GE(val1, val2)      val1 >= val2
+//   CHECK_LT(val1, val2)      val1 < val2
+//   CHECK_LE(val1, val2)      val1 <= val2
+//
+// Debug only versions of all of the check macros are also defined.  These
+// macros generate no code in a release build, but avoid unused variable
+// warnings / errors.
+//
+// To use the debug only versions, prepend a D to the normal check macros, e.g.
+// DCHECK_EQ(a, b).
+
+#ifndef CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+#define CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+
+#ifdef ANDROID
+#  include <android/log.h>
+#endif  // ANDROID
+
+#include <algorithm>
+#include <ctime>
+#include <fstream>
+#include <iostream>
+#include <set>
+#include <sstream>
+#include <string>
+#include <vector>
+
+// For appropriate definition of CERES_EXPORT macro.
+#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
+
+// Log severity level constants.
+const int FATAL   = -3;
+const int ERROR   = -2;
+const int WARNING = -1;
+const int INFO    =  0;
+
+// ------------------------- Glog compatibility ------------------------------
+
+namespace google {
+
+typedef int LogSeverity;
+const int INFO    = ::INFO;
+const int WARNING = ::WARNING;
+const int ERROR   = ::ERROR;
+const int FATAL   = ::FATAL;
+
+// Sink class used for integration with mock and test functions. If sinks are
+// added, all log output is also sent to each sink through the send function.
+// In this implementation, WaitTillSent() is called immediately after the send.
+// This implementation is not thread safe.
+class CERES_EXPORT LogSink {
+ public:
+  virtual ~LogSink() {}
+  virtual void send(LogSeverity severity,
+                    const char* full_filename,
+                    const char* base_filename,
+                    int line,
+                    const struct tm* tm_time,
+                    const char* message,
+                    size_t message_len) = 0;
+  virtual void WaitTillSent() = 0;
+};
+
+// Global set of log sinks. The actual object is defined in logging.cc.
+extern CERES_EXPORT std::set<LogSink *> log_sinks_global;
+
+inline void InitGoogleLogging(char *argv) {
+  // Do nothing; this is ignored.
+}
+
+// Note: the Log sink functions are not thread safe.
+inline void AddLogSink(LogSink *sink) {
+  // TODO(settinger): Add locks for thread safety.
+  log_sinks_global.insert(sink);
+}
+inline void RemoveLogSink(LogSink *sink) {
+  log_sinks_global.erase(sink);
+}
+
+}  // namespace google
+
+// ---------------------------- Logger Class --------------------------------
+
+// Class created for each use of the logging macros.
+// The logger acts as a stream and routes the final stream contents to the
+// Android logcat output at the proper filter level.  If ANDROID is not
+// defined, output is directed to std::cerr.  This class should not
+// be directly instantiated in code, rather it should be invoked through the
+// use of the log macros LG, LOG, or VLOG.
+class CERES_EXPORT MessageLogger {
+ public:
+  MessageLogger(const char *file, int line, const char *tag, int severity)
+    : file_(file), line_(line), tag_(tag), severity_(severity) {
+    // Pre-pend the stream with the file and line number.
+    StripBasename(std::string(file), &filename_only_);
+    stream_ << filename_only_ << ":" << line << " ";
+  }
+
+  // Output the contents of the stream to the proper channel on destruction.
+  ~MessageLogger() {
+    stream_ << "\n";
+
+#ifdef ANDROID
+    static const int android_log_levels[] = {
+        ANDROID_LOG_FATAL,    // LOG(FATAL)
+        ANDROID_LOG_ERROR,    // LOG(ERROR)
+        ANDROID_LOG_WARN,     // LOG(WARNING)
+        ANDROID_LOG_INFO,     // LOG(INFO), LG, VLOG(0)
+        ANDROID_LOG_DEBUG,    // VLOG(1)
+        ANDROID_LOG_VERBOSE,  // VLOG(2) .. VLOG(N)
+    };
+
+    // Bound the logging level.
+    const int kMaxVerboseLevel = 2;
+    int android_level_index = std::min(std::max(FATAL, severity_),
+                                       kMaxVerboseLevel) - FATAL;
+    int android_log_level = android_log_levels[android_level_index];
+
+    // Output the log string the Android log at the appropriate level.
+    __android_log_write(android_log_level, tag_.c_str(), stream_.str().c_str());
+
+    // Indicate termination if needed.
+    if (severity_ == FATAL) {
+      __android_log_write(ANDROID_LOG_FATAL,
+                          tag_.c_str(),
+                          "terminating.\n");
+    }
+#else
+    // If not building on Android, log all output to std::cerr.
+    std::cerr << stream_.str();
+#endif  // ANDROID
+
+    LogToSinks(severity_);
+    WaitForSinks();
+
+    // Android logging at level FATAL does not terminate execution, so abort()
+    // is still required to stop the program.
+    if (severity_ == FATAL) {
+      abort();
+    }
+  }
+
+  // Return the stream associated with the logger object.
+  std::stringstream &stream() { return stream_; }
+
+ private:
+  void LogToSinks(int severity) {
+    time_t rawtime;
+    time (&rawtime);
+
+    struct tm timeinfo;
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
+    // On Windows, use secure localtime_s not localtime.
+    localtime_s(&timeinfo, &rawtime);
+#else
+    // On non-Windows systems, use threadsafe localtime_r not localtime.
+    localtime_r(&rawtime, &timeinfo);
+#endif
+
+    std::set<google::LogSink*>::iterator iter;
+    // Send the log message to all sinks.
+    for (iter = google::log_sinks_global.begin();
+         iter != google::log_sinks_global.end(); ++iter) {
+      (*iter)->send(severity, file_.c_str(), filename_only_.c_str(), line_,
+                    &timeinfo, stream_.str().c_str(), stream_.str().size());
+    }
+  }
+
+  void WaitForSinks() {
+    // TODO(settinger): Add locks for thread safety.
+    std::set<google::LogSink *>::iterator iter;
+
+    // Call WaitTillSent() for all sinks.
+    for (iter = google::log_sinks_global.begin();
+         iter != google::log_sinks_global.end(); ++iter) {
+      (*iter)->WaitTillSent();
+    }
+  }
+
+  void StripBasename(const std::string &full_path, std::string *filename) {
+    // TODO(settinger): Add support for OSs with different path separators.
+    const char kSeparator = '/';
+    size_t pos = full_path.rfind(kSeparator);
+    if (pos != std::string::npos) {
+      *filename = full_path.substr(pos + 1, std::string::npos);
+    } else {
+      *filename = full_path;
+    }
+  }
+
+  std::string file_;
+  std::string filename_only_;
+  int line_;
+  std::string tag_;
+  std::stringstream stream_;
+  int severity_;
+};
+
+// ---------------------- Logging Macro definitions --------------------------
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros.  This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class CERES_EXPORT LoggerVoidify {
+ public:
+  LoggerVoidify() { }
+  // This has to be an operator with a precedence lower than << but
+  // higher than ?:
+  void operator&(const std::ostream &s) { }
+};
+
+// Log only if condition is met.  Otherwise evaluates to void.
+#define LOG_IF(severity, condition) \
+    !(condition) ? (void) 0 : LoggerVoidify() & \
+      MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+
+// Log only if condition is NOT met.  Otherwise evaluates to void.
+#define LOG_IF_FALSE(severity, condition) LOG_IF(severity, !(condition))
+
+// LG is a convenient shortcut for LOG(INFO). Its use is in new
+// google3 code is discouraged and the following shortcut exists for
+// backward compatibility with existing code.
+#ifdef MAX_LOG_LEVEL
+#  define LOG(n)  LOG_IF(n, n <= MAX_LOG_LEVEL)
+#  define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
+#  define LG      LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+#  define VLOG_IF(n, condition) LOG_IF(n, (n <= MAX_LOG_LEVEL) && condition)
+#else
+#  define LOG(n)  MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()    // NOLINT
+#  define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()    // NOLINT
+#  define LG      MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream() // NOLINT
+#  define VLOG_IF(n, condition) LOG_IF(n, condition)
+#endif
+
+// Currently, VLOG is always on for levels below MAX_LOG_LEVEL.
+#ifndef MAX_LOG_LEVEL
+#  define VLOG_IS_ON(x) (1)
+#else
+#  define VLOG_IS_ON(x) (x <= MAX_LOG_LEVEL)
+#endif
+
+#ifndef NDEBUG
+#  define DLOG LOG
+#else
+#  define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
+      MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+#endif
+
+
+// Log a message and terminate.
+template<class T>
+void LogMessageFatal(const char *file, int line, const T &message) {
+  MessageLogger((char *)__FILE__, __LINE__, "native", FATAL).stream()
+      << message;
+}
+
+// ---------------------------- CHECK macros ---------------------------------
+
+// Check for a given boolean condition.
+#define CHECK(condition) LOG_IF_FALSE(FATAL, condition) \
+        << "Check failed: " #condition " "
+
+#ifndef NDEBUG
+// Debug only version of CHECK
+#  define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
+          << "Check failed: " #condition " "
+#else
+// Optimized version - generates no code.
+#  define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
+          << "Check failed: " #condition " "
+#endif  // NDEBUG
+
+// ------------------------- CHECK_OP macros ---------------------------------
+
+// Generic binary operator check macro. This should not be directly invoked,
+// instead use the binary comparison macros defined below.
+#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, ((val1) op (val2))) \
+  << "Check failed: " #val1 " " #op " " #val2 " "
+
+// Check_op macro definitions
+#define CHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+#define CHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+#define CHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+#define CHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+#define CHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+#define CHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+
+#ifndef NDEBUG
+// Debug only versions of CHECK_OP macros.
+#  define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+#  define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+#  define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+#  define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+#  define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+#  define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+#else
+// These versions generate no code in optimized mode.
+#  define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
+#  define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
+#  define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
+#  define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
+#  define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
+#  define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
+#endif  // NDEBUG
+
+// ---------------------------CHECK_NOTNULL macros ---------------------------
+
+// Helpers for CHECK_NOTNULL(). Two are necessary to support both raw pointers
+// and smart pointers.
+template <typename T>
+T& CheckNotNullCommon(const char *file, int line, const char *names, T& t) {
+  if (t == NULL) {
+    LogMessageFatal(file, line, std::string(names));
+  }
+  return t;
+}
+
+template <typename T>
+T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+  return CheckNotNullCommon(file, line, names, t);
+}
+
+template <typename T>
+T& CheckNotNull(const char *file, int line, const char *names, T& t) {
+  return CheckNotNullCommon(file, line, names, t);
+}
+
+// Check that a pointer is not null.
+#define CHECK_NOTNULL(val) \
+  CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+
+#ifndef NDEBUG
+// Debug only version of CHECK_NOTNULL
+#define DCHECK_NOTNULL(val) \
+  CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+#else
+// Optimized version - generates no code.
+#define DCHECK_NOTNULL(val) if (false)\
+  CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+#endif  // NDEBUG
+
+#include "ceres/internal/reenable_warnings.h"
+
+#endif  // CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc
new file mode 100644
index 0000000..f596033
--- /dev/null
+++ b/internal/ceres/minimizer.cc
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/line_search_minimizer.h"
+#include "ceres/minimizer.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Minimizer* Minimizer::Create(MinimizerType minimizer_type) {
+  if (minimizer_type == TRUST_REGION) {
+    return new TrustRegionMinimizer;
+  }
+
+  if (minimizer_type == LINE_SEARCH) {
+    return new LineSearchMinimizer;
+  }
+
+  LOG(FATAL) << "Unknown minimizer_type: " << minimizer_type;
+  return NULL;
+}
+
+
+Minimizer::~Minimizer() {}
+
+bool Minimizer::RunCallbacks(const Minimizer::Options& options,
+                             const IterationSummary& iteration_summary,
+                             Solver::Summary* summary) {
+  const bool is_not_silent = !options.is_silent;
+  CallbackReturnType status = SOLVER_CONTINUE;
+  int i = 0;
+  while (status == SOLVER_CONTINUE && i < options.callbacks.size()) {
+    status = (*options.callbacks[i])(iteration_summary);
+    ++i;
+  }
+  switch (status) {
+    case SOLVER_CONTINUE:
+      return true;
+    case SOLVER_TERMINATE_SUCCESSFULLY:
+      summary->termination_type = USER_SUCCESS;
+      summary->message =
+          "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      return false;
+    case SOLVER_ABORT:
+      summary->termination_type = USER_FAILURE;
+      summary->message = "User callback returned SOLVER_ABORT.";
+      VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+      return false;
+    default:
+      LOG(FATAL) << "Unknown type of user callback status";
+  }
+  return false;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
new file mode 100644
index 0000000..afdd60d
--- /dev/null
+++ b/internal/ceres/minimizer.h
@@ -0,0 +1,203 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_MINIMIZER_H_
+#define CERES_INTERNAL_MINIMIZER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class Evaluator;
+class SparseMatrix;
+class TrustRegionStrategy;
+class CoordinateDescentMinimizer;
+class LinearSolver;
+
+// Interface for non-linear least squares solvers.
+class Minimizer {
+ public:
+  // Options struct to control the behaviour of the Minimizer. Please
+  // see solver.h for detailed information about the meaning and
+  // default values of each of these parameters.
+  struct Options {
+    Options() {
+      Init(Solver::Options());
+    }
+
+    explicit Options(const Solver::Options& options) {
+      Init(options);
+    }
+
+    void Init(const Solver::Options& options) {
+      num_threads = options.num_threads;
+      max_num_iterations = options.max_num_iterations;
+      max_solver_time_in_seconds = options.max_solver_time_in_seconds;
+      max_step_solver_retries = 5;
+      gradient_tolerance = options.gradient_tolerance;
+      parameter_tolerance = options.parameter_tolerance;
+      function_tolerance = options.function_tolerance;
+      min_relative_decrease = options.min_relative_decrease;
+      eta = options.eta;
+      jacobi_scaling = options.jacobi_scaling;
+      use_nonmonotonic_steps = options.use_nonmonotonic_steps;
+      max_consecutive_nonmonotonic_steps =
+          options.max_consecutive_nonmonotonic_steps;
+      trust_region_problem_dump_directory =
+          options.trust_region_problem_dump_directory;
+      trust_region_minimizer_iterations_to_dump =
+          options.trust_region_minimizer_iterations_to_dump;
+      trust_region_problem_dump_format_type =
+          options.trust_region_problem_dump_format_type;
+      max_num_consecutive_invalid_steps =
+          options.max_num_consecutive_invalid_steps;
+      min_trust_region_radius = options.min_trust_region_radius;
+      line_search_direction_type = options.line_search_direction_type;
+      line_search_type = options.line_search_type;
+      nonlinear_conjugate_gradient_type =
+          options.nonlinear_conjugate_gradient_type;
+      max_lbfgs_rank = options.max_lbfgs_rank;
+      use_approximate_eigenvalue_bfgs_scaling =
+          options.use_approximate_eigenvalue_bfgs_scaling;
+      line_search_interpolation_type =
+          options.line_search_interpolation_type;
+      min_line_search_step_size = options.min_line_search_step_size;
+      line_search_sufficient_function_decrease =
+          options.line_search_sufficient_function_decrease;
+      max_line_search_step_contraction =
+          options.max_line_search_step_contraction;
+      min_line_search_step_contraction =
+          options.min_line_search_step_contraction;
+      max_num_line_search_step_size_iterations =
+          options.max_num_line_search_step_size_iterations;
+      max_num_line_search_direction_restarts =
+          options.max_num_line_search_direction_restarts;
+      line_search_sufficient_curvature_decrease =
+          options.line_search_sufficient_curvature_decrease;
+      max_line_search_step_expansion =
+          options.max_line_search_step_expansion;
+      inner_iteration_tolerance = options.inner_iteration_tolerance;
+      is_silent = (options.logging_type == SILENT);
+      is_constrained = false;
+      callbacks = options.callbacks;
+    }
+
+    int max_num_iterations;
+    double max_solver_time_in_seconds;
+    int num_threads;
+
+    // Number of times the linear solver should be retried in case of
+    // numerical failure. The retries are done by exponentially scaling up
+    // mu at each retry. This leads to stronger and stronger
+    // regularization making the linear least squares problem better
+    // conditioned at each retry.
+    int max_step_solver_retries;
+    double gradient_tolerance;
+    double parameter_tolerance;
+    double function_tolerance;
+    double min_relative_decrease;
+    double eta;
+    bool jacobi_scaling;
+    bool use_nonmonotonic_steps;
+    int max_consecutive_nonmonotonic_steps;
+    std::vector<int> trust_region_minimizer_iterations_to_dump;
+    DumpFormatType trust_region_problem_dump_format_type;
+    std::string trust_region_problem_dump_directory;
+    int max_num_consecutive_invalid_steps;
+    double min_trust_region_radius;
+    LineSearchDirectionType line_search_direction_type;
+    LineSearchType line_search_type;
+    NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+    int max_lbfgs_rank;
+    bool use_approximate_eigenvalue_bfgs_scaling;
+    LineSearchInterpolationType line_search_interpolation_type;
+    double min_line_search_step_size;
+    double line_search_sufficient_function_decrease;
+    double max_line_search_step_contraction;
+    double min_line_search_step_contraction;
+    int max_num_line_search_step_size_iterations;
+    int max_num_line_search_direction_restarts;
+    double line_search_sufficient_curvature_decrease;
+    double max_line_search_step_expansion;
+    double inner_iteration_tolerance;
+
+    // If true, then all logging is disabled.
+    bool is_silent;
+
+    // Use a bounds constrained optimization algorithm.
+    bool is_constrained;
+
+    // List of callbacks that are executed by the Minimizer at the end
+    // of each iteration.
+    //
+    // The Options struct does not own these pointers.
+    std::vector<IterationCallback*> callbacks;
+
+    // Object responsible for evaluating the cost, residuals and
+    // Jacobian matrix.
+    std::shared_ptr<Evaluator> evaluator;
+
+    // Object responsible for actually computing the trust region
+    // step, and sizing the trust region radius.
+    std::shared_ptr<TrustRegionStrategy> trust_region_strategy;
+
+    // Object holding the Jacobian matrix. It is assumed that the
+    // sparsity structure of the matrix has already been initialized
+    // and will remain constant for the life time of the
+    // optimization.
+    std::shared_ptr<SparseMatrix> jacobian;
+
+    std::shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
+  };
+
+  static Minimizer* Create(MinimizerType minimizer_type);
+  static bool RunCallbacks(const Options& options,
+                           const IterationSummary& iteration_summary,
+                           Solver::Summary* summary);
+
+  virtual ~Minimizer();
+  // Note: The minimizer is expected to update the state of the
+  // parameters array every iteration. This is required for the
+  // StateUpdatingCallback to work.
+  virtual void Minimize(const Options& options,
+                        double* parameters,
+                        Solver::Summary* summary) = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_MINIMIZER_H_
diff --git a/internal/ceres/minimizer_test.cc b/internal/ceres/minimizer_test.cc
new file mode 100644
index 0000000..fe9b15e
--- /dev/null
+++ b/internal/ceres/minimizer_test.cc
@@ -0,0 +1,100 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "gtest/gtest.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class FakeIterationCallback : public IterationCallback {
+ public:
+  virtual ~FakeIterationCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    return SOLVER_CONTINUE;
+  }
+};
+
+TEST(Minimizer, InitializationCopiesCallbacks) {
+  FakeIterationCallback callback0;
+  FakeIterationCallback callback1;
+
+  Solver::Options solver_options;
+  solver_options.callbacks.push_back(&callback0);
+  solver_options.callbacks.push_back(&callback1);
+
+  Minimizer::Options minimizer_options(solver_options);
+  ASSERT_EQ(2, minimizer_options.callbacks.size());
+
+  EXPECT_EQ(minimizer_options.callbacks[0], &callback0);
+  EXPECT_EQ(minimizer_options.callbacks[1], &callback1);
+}
+
+class AbortingIterationCallback : public IterationCallback {
+ public:
+  virtual ~AbortingIterationCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    return SOLVER_ABORT;
+  }
+};
+
+TEST(Minimizer, UserAbortUpdatesSummaryMessage) {
+  AbortingIterationCallback callback;
+  Solver::Options solver_options;
+  solver_options.callbacks.push_back(&callback);
+  Minimizer::Options minimizer_options(solver_options);
+  Solver::Summary summary;
+  Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+  EXPECT_EQ(summary.message, "User callback returned SOLVER_ABORT.");
+}
+
+class SucceedingIterationCallback : public IterationCallback {
+ public:
+  virtual ~SucceedingIterationCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    return SOLVER_TERMINATE_SUCCESSFULLY;
+  }
+};
+
+TEST(Minimizer, UserSuccessUpdatesSummaryMessage) {
+  SucceedingIterationCallback callback;
+  Solver::Options solver_options;
+  solver_options.callbacks.push_back(&callback);
+  Minimizer::Options minimizer_options(solver_options);
+  Solver::Summary summary;
+  Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+  EXPECT_EQ(summary.message,
+            "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.");
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/normal_prior.cc b/internal/ceres/normal_prior.cc
new file mode 100644
index 0000000..a3d5d8e
--- /dev/null
+++ b/internal/ceres/normal_prior.cc
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/normal_prior.h"
+
+#include <cstddef>
+#include <vector>
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+NormalPrior::NormalPrior(const Matrix& A, const Vector& b)
+    : A_(A), b_(b) {
+  CHECK_GT(b_.rows(), 0);
+  CHECK_GT(A_.rows(), 0);
+  CHECK_EQ(b_.rows(), A.cols());
+  set_num_residuals(A_.rows());
+  mutable_parameter_block_sizes()->push_back(b_.rows());
+}
+
+bool NormalPrior::Evaluate(double const* const* parameters,
+                           double* residuals,
+                           double** jacobians) const {
+  ConstVectorRef p(parameters[0], parameter_block_sizes()[0]);
+  VectorRef r(residuals, num_residuals());
+  // The following line should read
+  // r = A_ * (p - b_);
+  // The extra eval is to get around a bug in the eigen library.
+  r = A_ * (p - b_).eval();
+  if ((jacobians != NULL) && (jacobians[0] != NULL)) {
+    MatrixRef(jacobians[0], num_residuals(), parameter_block_sizes()[0]) = A_;
+  }
+  return true;
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
new file mode 100644
index 0000000..1a51cfd
--- /dev/null
+++ b/internal/ceres/normal_prior_test.cc
@@ -0,0 +1,133 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/normal_prior.h"
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/random.h"
+
+namespace ceres {
+namespace internal {
+
+void RandomVector(Vector* v) {
+  for (int r = 0; r < v->rows(); ++r)
+    (*v)[r] = 2 * RandDouble() - 1;
+}
+
+void RandomMatrix(Matrix* m) {
+  for (int r = 0; r < m->rows(); ++r) {
+    for (int c = 0; c < m->cols(); ++c) {
+      (*m)(r, c) = 2 * RandDouble() - 1;
+    }
+  }
+}
+
+TEST(NormalPriorTest, ResidualAtRandomPosition) {
+  srand(5);
+
+  for (int num_rows = 1; num_rows < 5; ++num_rows) {
+    for (int num_cols = 1; num_cols < 5; ++num_cols) {
+      Vector b(num_cols);
+      RandomVector(&b);
+
+      Matrix A(num_rows, num_cols);
+      RandomMatrix(&A);
+
+      double * x = new double[num_cols];
+      for (int i = 0; i < num_cols; ++i)
+        x[i] = 2 * RandDouble() - 1;
+
+      double * jacobian = new double[num_rows * num_cols];
+      Vector residuals(num_rows);
+
+      NormalPrior prior(A, b);
+      prior.Evaluate(&x, residuals.data(), &jacobian);
+
+      // Compare the norm of the residual
+      double residual_diff_norm =
+          (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+      EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+      // Compare the jacobians
+      MatrixRef J(jacobian, num_rows, num_cols);
+      double jacobian_diff_norm = (J - A).norm();
+      EXPECT_NEAR(jacobian_diff_norm, 0.0, 1e-10);
+
+      delete []x;
+      delete []jacobian;
+    }
+  }
+}
+
+TEST(NormalPriorTest, ResidualAtRandomPositionNullJacobians) {
+  srand(5);
+
+  for (int num_rows = 1; num_rows < 5; ++num_rows) {
+    for (int num_cols = 1; num_cols < 5; ++num_cols) {
+      Vector b(num_cols);
+      RandomVector(&b);
+
+      Matrix A(num_rows, num_cols);
+      RandomMatrix(&A);
+
+      double * x = new double[num_cols];
+      for (int i = 0; i < num_cols; ++i)
+        x[i] = 2 * RandDouble() - 1;
+
+      double* jacobians[1];
+      jacobians[0] = NULL;
+
+      Vector residuals(num_rows);
+
+      NormalPrior prior(A, b);
+      prior.Evaluate(&x, residuals.data(), jacobians);
+
+      // Compare the norm of the residual
+      double residual_diff_norm =
+          (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+      EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+      prior.Evaluate(&x, residuals.data(), NULL);
+      // Compare the norm of the residual
+      residual_diff_norm =
+          (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+      EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+
+      delete []x;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..105bef5
--- /dev/null
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -0,0 +1,447 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         tbennun@gmail.com (Tal Ben-Nun)
+
+#include "ceres/numeric_diff_cost_function.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/array_utils.h"
+#include "ceres/numeric_diff_test_utils.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(NumericDiffCostFunction, EasyCaseFunctorCentralDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor,
+                                  CENTRAL,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyFunctor));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
+TEST(NumericDiffCostFunction, EasyCaseFunctorForwardDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor,
+                                  FORWARD,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyFunctor));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
+}
+
+TEST(NumericDiffCostFunction, EasyCaseFunctorRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor,
+                                  RIDDERS,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyFunctor));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
+}
+
+TEST(NumericDiffCostFunction, EasyCaseCostFunctionCentralDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyCostFunction,
+                                  CENTRAL,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyCostFunction, TAKE_OWNERSHIP));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
+TEST(NumericDiffCostFunction, EasyCaseCostFunctionForwardDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyCostFunction,
+                                  FORWARD,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyCostFunction, TAKE_OWNERSHIP));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
+}
+
+TEST(NumericDiffCostFunction, EasyCaseCostFunctionRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyCostFunction,
+                                  RIDDERS,
+                                  3,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new EasyCostFunction, TAKE_OWNERSHIP));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseFunctorCentralDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalFunctor,
+                                  CENTRAL,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalFunctor));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseFunctorForwardDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalFunctor,
+                                  FORWARD,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalFunctor));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseFunctorRidders) {
+  NumericDiffOptions options;
+
+  // Using a smaller initial step size to overcome oscillatory function
+  // behavior.
+  options.ridders_relative_initial_step_size = 1e-3;
+
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalFunctor,
+                                  RIDDERS,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalFunctor, TAKE_OWNERSHIP, 2, options));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseCostFunctionCentralDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalCostFunction,
+                                  CENTRAL,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseCostFunctionForwardDifferences) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalCostFunction,
+                                  FORWARD,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, FORWARD);
+}
+
+TEST(NumericDiffCostFunction,
+     TranscendentalCaseCostFunctionRidders) {
+  NumericDiffOptions options;
+
+  // Using a smaller initial step size to overcome oscillatory function
+  // behavior.
+  options.ridders_relative_initial_step_size = 1e-3;
+
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<TranscendentalCostFunction,
+                                  RIDDERS,
+                                  2,  /* number of residuals */
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+          new TranscendentalCostFunction, TAKE_OWNERSHIP, 2, options));
+  TranscendentalFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, RIDDERS);
+}
+
+template<int num_rows, int num_cols>
+class SizeTestingCostFunction : public SizedCostFunction<num_rows, num_cols> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return true;
+  }
+};
+
+// As described in
+// http://forum.kde.org/viewtopic.php?f=74&t=98536#p210774
+// Eigen3 has restrictions on the Row/Column major storage of vectors,
+// depending on their dimensions. This test ensures that the correct
+// templates are instantiated for various shapes of the Jacobian
+// matrix.
+TEST(NumericDiffCostFunction, EigenRowMajorColMajorTest) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<SizeTestingCostFunction<1,1>,  CENTRAL, 1, 1>(
+          new SizeTestingCostFunction<1,1>, ceres::TAKE_OWNERSHIP));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<SizeTestingCostFunction<2,1>,  CENTRAL, 2, 1>(
+          new SizeTestingCostFunction<2,1>, ceres::TAKE_OWNERSHIP));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<SizeTestingCostFunction<1,2>,  CENTRAL, 1, 2>(
+          new SizeTestingCostFunction<1,2>, ceres::TAKE_OWNERSHIP));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<SizeTestingCostFunction<2,2>,  CENTRAL, 2, 2>(
+          new SizeTestingCostFunction<2,2>, ceres::TAKE_OWNERSHIP));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>(
+          new EasyFunctor, TAKE_OWNERSHIP, 1));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 1>(
+          new EasyFunctor, TAKE_OWNERSHIP, 2));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>(
+          new EasyFunctor, TAKE_OWNERSHIP, 1));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 1, 2>(
+          new EasyFunctor, TAKE_OWNERSHIP, 2));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>(
+          new EasyFunctor, TAKE_OWNERSHIP, 1));
+
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor, CENTRAL, ceres::DYNAMIC, 2, 1>(
+          new EasyFunctor, TAKE_OWNERSHIP, 2));
+}
+
+TEST(NumericDiffCostFunction,
+     EasyCaseFunctorCentralDifferencesAndDynamicNumResiduals) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<EasyFunctor,
+                                  CENTRAL,
+                                  ceres::DYNAMIC,
+                                  5,  /* size of x1 */
+                                  5   /* size of x2 */>(
+                                      new EasyFunctor, TAKE_OWNERSHIP, 3));
+  EasyFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
+TEST(NumericDiffCostFunction, ExponentialFunctorRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<ExponentialFunctor,
+                                  RIDDERS,
+                                  1,  /* number of residuals */
+                                  1   /* size of x1 */>(
+             new ExponentialFunctor));
+  ExponentialFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
+}
+
+TEST(NumericDiffCostFunction, ExponentialCostFunctionRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(
+      new NumericDiffCostFunction<ExponentialCostFunction,
+                                  RIDDERS,
+                                  1,  /* number of residuals */
+                                  1   /* size of x1 */>(
+             new ExponentialCostFunction));
+  ExponentialFunctor functor;
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
+}
+
+TEST(NumericDiffCostFunction, RandomizedFunctorRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  NumericDiffOptions options;
+  // Larger initial step size is chosen to produce robust results in the
+  // presence of random noise.
+  options.ridders_relative_initial_step_size = 10.0;
+
+  cost_function.reset(
+      new NumericDiffCostFunction<RandomizedFunctor,
+                                  RIDDERS,
+                                  1,  /* number of residuals */
+                                  1   /* size of x1 */>(
+             new RandomizedFunctor(kNoiseFactor, kRandomSeed), TAKE_OWNERSHIP,
+             1, options));
+  RandomizedFunctor functor (kNoiseFactor, kRandomSeed);
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
+}
+
+TEST(NumericDiffCostFunction, RandomizedCostFunctionRidders) {
+  std::unique_ptr<CostFunction> cost_function;
+  NumericDiffOptions options;
+  // Larger initial step size is chosen to produce robust results in the
+  // presence of random noise.
+  options.ridders_relative_initial_step_size = 10.0;
+
+  cost_function.reset(
+      new NumericDiffCostFunction<RandomizedCostFunction,
+                                  RIDDERS,
+                                  1,  /* number of residuals */
+                                  1   /* size of x1 */>(
+             new RandomizedCostFunction(kNoiseFactor, kRandomSeed),
+             TAKE_OWNERSHIP, 1, options));
+  RandomizedFunctor functor (kNoiseFactor, kRandomSeed);
+  functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function);
+}
+
+struct OnlyFillsOneOutputFunctor {
+  bool operator()(const double* x, double* output) const {
+    output[0] = x[0];
+    return true;
+  }
+};
+
+TEST(NumericDiffCostFunction, PartiallyFilledResidualShouldFailEvaluation) {
+  double parameter = 1.0;
+  double jacobian[2];
+  double residuals[2];
+  double* parameters[] = {&parameter};
+  double* jacobians[] = {jacobian};
+
+  std::unique_ptr<CostFunction> cost_function(
+      new NumericDiffCostFunction<OnlyFillsOneOutputFunctor, CENTRAL, 2, 1>(
+          new OnlyFillsOneOutputFunctor));
+  InvalidateArray(2, jacobian);
+  InvalidateArray(2, residuals);
+  EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, jacobians));
+  EXPECT_FALSE(IsArrayValid(2, residuals));
+  InvalidateArray(2, residuals);
+  EXPECT_TRUE(cost_function->Evaluate(parameters, residuals, NULL));
+  // We are only testing residuals here, because the Jacobians are
+  // computed using finite differencing from the residuals, so unless
+  // we introduce a validation step after every evaluation of
+  // residuals inside NumericDiffCostFunction, there is no way of
+  // ensuring that the Jacobian array is invalid.
+  EXPECT_FALSE(IsArrayValid(2, residuals));
+}
+
+TEST(NumericDiffCostFunction, ParameterBlockConstant) {
+  constexpr int kNumResiduals = 3;
+  constexpr int kX1 = 5;
+  constexpr int kX2 = 5;
+
+  std::unique_ptr<CostFunction> cost_function;
+  cost_function.reset(new NumericDiffCostFunction<EasyFunctor,
+                                                  CENTRAL,
+                                                  kNumResiduals,
+                                                  kX1,
+                                                  kX2>(new EasyFunctor));
+
+  // Prepare the parameters and residuals.
+  std::array<double, kX1> x1{1e-64, 2.0, 3.0, 4.0, 5.0};
+  std::array<double, kX2> x2{9.0, 9.0, 5.0, 5.0, 1.0};
+  std::array<double*, 2> parameter_blocks{x1.data(), x2.data()};
+
+  std::vector<double> residuals(kNumResiduals, -100000);
+
+  // Evaluate the full jacobian.
+  std::vector<std::vector<double>> jacobian_full_vect(2);
+  jacobian_full_vect[0].resize(kNumResiduals * kX1, -100000);
+  jacobian_full_vect[1].resize(kNumResiduals * kX2, -100000);
+  {
+    std::array<double*, 2> jacobian{jacobian_full_vect[0].data(),
+                                    jacobian_full_vect[1].data()};
+    ASSERT_TRUE(cost_function->Evaluate(
+        parameter_blocks.data(), residuals.data(), jacobian.data()));
+  }
+
+  // Evaluate and check jacobian when first parameter block is constant.
+  {
+    std::vector<double> jacobian_vect(kNumResiduals * kX2, -100000);
+    std::array<double*, 2> jacobian{nullptr, jacobian_vect.data()};
+
+    ASSERT_TRUE(cost_function->Evaluate(
+        parameter_blocks.data(), residuals.data(), jacobian.data()));
+
+    for (int i = 0; i < kNumResiduals * kX2; ++i) {
+      EXPECT_DOUBLE_EQ(jacobian_full_vect[1][i], jacobian_vect[i]);
+    }
+  }
+
+  // Evaluate and check jacobian when second parameter block is constant.
+  {
+    std::vector<double> jacobian_vect(kNumResiduals * kX1, -100000);
+    std::array<double*, 2> jacobian{jacobian_vect.data(), nullptr};
+
+    ASSERT_TRUE(cost_function->Evaluate(
+        parameter_blocks.data(), residuals.data(), jacobian.data()));
+
+    for (int i = 0; i < kNumResiduals * kX1; ++i) {
+      EXPECT_DOUBLE_EQ(jacobian_full_vect[0][i], jacobian_vect[i]);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/numeric_diff_test_utils.cc b/internal/ceres/numeric_diff_test_utils.cc
new file mode 100644
index 0000000..ab1b5f8
--- /dev/null
+++ b/internal/ceres/numeric_diff_test_utils.cc
@@ -0,0 +1,273 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         tbennun@gmail.com (Tal Ben-Nun)
+
+#include "ceres/numeric_diff_test_utils.h"
+
+#include <algorithm>
+#include <cmath>
+#include "ceres/cost_function.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+
+namespace ceres {
+namespace internal {
+
+bool EasyFunctor::operator()(const double* x1,
+                             const double* x2,
+                             double* residuals) const {
+  residuals[0] = residuals[1] = residuals[2] = 0;
+  for (int i = 0; i < 5; ++i) {
+    residuals[0] += x1[i] * x2[i];
+    residuals[2] += x2[i] * x2[i];
+  }
+  residuals[1] = residuals[0] * residuals[0];
+  return true;
+}
+
+void EasyFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
+    const CostFunction& cost_function,
+    NumericDiffMethodType method) const {
+  // The x1[0] is made deliberately small to test the performance near
+  // zero.
+  double x1[] = { 1e-64, 2.0, 3.0, 4.0, 5.0 };
+  double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
+  double *parameters[] = { &x1[0], &x2[0] };
+
+  double dydx1[15];  // 3 x 5, row major.
+  double dydx2[15];  // 3 x 5, row major.
+  double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+  double residuals[3] = {-1e-100, -2e-100, -3e-100 };
+
+  ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
+                                     &residuals[0],
+                                     &jacobians[0]));
+
+  double expected_residuals[3];
+  EasyFunctor functor;
+  functor(x1, x2, expected_residuals);
+  EXPECT_EQ(expected_residuals[0], residuals[0]);
+  EXPECT_EQ(expected_residuals[1], residuals[1]);
+  EXPECT_EQ(expected_residuals[2], residuals[2]);
+
+  double tolerance = 0.0;
+  switch (method) {
+    default:
+    case CENTRAL:
+      tolerance = 3e-9;
+      break;
+
+    case FORWARD:
+      tolerance = 2e-5;
+      break;
+
+    case RIDDERS:
+      tolerance = 1e-13;
+      break;
+  }
+
+  for (int i = 0; i < 5; ++i) {
+    ExpectClose(x2[i],                    dydx1[5 * 0 + i], tolerance);  // y1
+    ExpectClose(x1[i],                    dydx2[5 * 0 + i], tolerance);
+    ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], tolerance);  // y2
+    ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], tolerance);
+    ExpectClose(0.0,                      dydx1[5 * 2 + i], tolerance);  // y3
+    ExpectClose(2 * x2[i],                dydx2[5 * 2 + i], tolerance);
+  }
+}
+
+bool TranscendentalFunctor::operator()(const double* x1,
+                                       const double* x2,
+                                       double* residuals) const {
+  double x1x2 = 0;
+  for (int i = 0; i < 5; ++i) {
+    x1x2 += x1[i] * x2[i];
+  }
+  residuals[0] = sin(x1x2);
+  residuals[1] = exp(-x1x2 / 10);
+  return true;
+}
+
+void TranscendentalFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
+    const CostFunction& cost_function,
+    NumericDiffMethodType method) const {
+
+  struct TestParameterBlocks {
+    double x1[5];
+    double x2[5];
+  };
+
+  std::vector<TestParameterBlocks> kTests =  {
+    { { 1.0, 2.0, 3.0, 4.0, 5.0 },  // No zeros.
+      { 9.0, 9.0, 5.0, 5.0, 1.0 },
+    },
+    { { 0.0, 2.0, 3.0, 0.0, 5.0 },  // Some zeros x1.
+      { 9.0, 9.0, 5.0, 5.0, 1.0 },
+    },
+    { { 1.0, 2.0, 3.0, 1.0, 5.0 },  // Some zeros x2.
+      { 0.0, 9.0, 0.0, 5.0, 0.0 },
+    },
+    { { 0.0, 0.0, 0.0, 0.0, 0.0 },  // All zeros x1.
+      { 9.0, 9.0, 5.0, 5.0, 1.0 },
+    },
+    { { 1.0, 2.0, 3.0, 4.0, 5.0 },  // All zeros x2.
+      { 0.0, 0.0, 0.0, 0.0, 0.0 },
+    },
+    { { 0.0, 0.0, 0.0, 0.0, 0.0 },  // All zeros.
+      { 0.0, 0.0, 0.0, 0.0, 0.0 },
+    },
+  };
+
+  for (int k = 0; k < kTests.size(); ++k) {
+    double *x1 = &(kTests[k].x1[0]);
+    double *x2 = &(kTests[k].x2[0]);
+    double *parameters[] = { x1, x2 };
+
+    double dydx1[10];
+    double dydx2[10];
+    double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+    double residuals[2];
+
+    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
+                                       &residuals[0],
+                                       &jacobians[0]));
+    double x1x2 = 0;
+    for (int i = 0; i < 5; ++i) {
+      x1x2 += x1[i] * x2[i];
+    }
+
+    double tolerance = 0.0;
+    switch (method) {
+      default:
+      case CENTRAL:
+        tolerance = 2e-7;
+        break;
+
+      case FORWARD:
+        tolerance = 2e-5;
+        break;
+
+      case RIDDERS:
+        tolerance = 3e-12;
+        break;
+    }
+
+    for (int i = 0; i < 5; ++i) {
+      ExpectClose( x2[i] * cos(x1x2),              dydx1[5 * 0 + i], tolerance);
+      ExpectClose( x1[i] * cos(x1x2),              dydx2[5 * 0 + i], tolerance);
+      ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], tolerance);
+      ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], tolerance);
+    }
+  }
+}
+
+bool ExponentialFunctor::operator()(const double* x1,
+                                    double* residuals) const {
+  residuals[0] = exp(x1[0]);
+  return true;
+}
+
+
+void ExponentialFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
+    const CostFunction& cost_function) const {
+  // Evaluating the functor at specific points for testing.
+  std::vector<double> kTests = { 1.0, 2.0, 3.0, 4.0, 5.0 };
+
+  // Minimal tolerance w.r.t. the cost function and the tests.
+  const double kTolerance = 2e-14;
+
+  for (int k = 0; k < kTests.size(); ++k) {
+    double *parameters[] = { &kTests[k] };
+    double dydx;
+    double *jacobians[1] = { &dydx };
+    double residual;
+
+    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
+                                       &residual,
+                                       &jacobians[0]));
+
+
+    double expected_result = exp(kTests[k]);
+
+    // Expect residual to be close to exp(x).
+    ExpectClose(residual, expected_result, kTolerance);
+
+    // Check evaluated differences. dydx should also be close to exp(x).
+    ExpectClose(dydx, expected_result, kTolerance);
+  }
+}
+
+bool RandomizedFunctor::operator()(const double* x1,
+                                   double* residuals) const {
+  double random_value = static_cast<double>(rand()) /
+      static_cast<double>(RAND_MAX);
+
+  // Normalize noise to [-factor, factor].
+  random_value *= 2.0;
+  random_value -= 1.0;
+  random_value *= noise_factor_;
+
+  residuals[0] = x1[0] * x1[0] + random_value;
+  return true;
+}
+
+void RandomizedFunctor::ExpectCostFunctionEvaluationIsNearlyCorrect(
+    const CostFunction& cost_function) const {
+  std::vector<double> kTests = { 0.0, 1.0, 3.0, 4.0, 50.0 };
+
+  const double kTolerance = 2e-4;
+
+  // Initialize random number generator with given seed.
+  srand(random_seed_);
+
+  for (int k = 0; k < kTests.size(); ++k) {
+    double *parameters[] = { &kTests[k] };
+    double dydx;
+    double *jacobians[1] = { &dydx };
+    double residual;
+
+    ASSERT_TRUE(cost_function.Evaluate(&parameters[0],
+                                       &residual,
+                                       &jacobians[0]));
+
+    // Expect residual to be close to x^2 w.r.t. noise factor.
+    ExpectClose(residual, kTests[k] * kTests[k], noise_factor_);
+
+    // Check evaluated differences. (dy/dx = ~2x)
+    ExpectClose(dydx, 2 * kTests[k], kTolerance);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/numeric_diff_test_utils.h b/internal/ceres/numeric_diff_test_utils.h
new file mode 100644
index 0000000..2a551d3
--- /dev/null
+++ b/internal/ceres/numeric_diff_test_utils.h
@@ -0,0 +1,152 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_
+#define CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_
+
+#include "ceres/cost_function.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// Noise factor for randomized cost function.
+static const double kNoiseFactor = 0.01;
+
+// Default random seed for randomized cost function.
+static const unsigned int kRandomSeed = 1234;
+
+// y1 = x1'x2      -> dy1/dx1 = x2,               dy1/dx2 = x1
+// y2 = (x1'x2)^2  -> dy2/dx1 = 2 * x2 * (x1'x2), dy2/dx2 = 2 * x1 * (x1'x2)
+// y3 = x2'x2      -> dy3/dx1 = 0,                dy3/dx2 = 2 * x2
+class EasyFunctor {
+ public:
+  bool operator()(const double* x1, const double* x2, double* residuals) const;
+  void ExpectCostFunctionEvaluationIsNearlyCorrect(
+      const CostFunction& cost_function,
+      NumericDiffMethodType method) const;
+};
+
+class EasyCostFunction : public SizedCostFunction<3, 5, 5> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** /* not used */) const {
+    return functor_(parameters[0], parameters[1], residuals);
+  }
+
+ private:
+  EasyFunctor functor_;
+};
+
+// y1 = sin(x1'x2)
+// y2 = exp(-x1'x2 / 10)
+//
+// dy1/dx1 =  x2 * cos(x1'x2),            dy1/dx2 =  x1 * cos(x1'x2)
+// dy2/dx1 = -x2 * exp(-x1'x2 / 10) / 10, dy2/dx2 = -x2 * exp(-x1'x2 / 10) / 10
+class TranscendentalFunctor {
+ public:
+  bool operator()(const double* x1, const double* x2, double* residuals) const;
+  void ExpectCostFunctionEvaluationIsNearlyCorrect(
+      const CostFunction& cost_function,
+      NumericDiffMethodType method) const;
+};
+
+class TranscendentalCostFunction : public SizedCostFunction<2, 5, 5> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** /* not used */) const {
+    return functor_(parameters[0], parameters[1], residuals);
+  }
+ private:
+  TranscendentalFunctor functor_;
+};
+
+// y = exp(x), dy/dx = exp(x)
+class ExponentialFunctor {
+ public:
+  bool operator()(const double* x1, double* residuals) const;
+  void ExpectCostFunctionEvaluationIsNearlyCorrect(
+      const CostFunction& cost_function) const;
+};
+
+class ExponentialCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** /* not used */) const {
+    return functor_(parameters[0], residuals);
+  }
+
+ private:
+  ExponentialFunctor functor_;
+};
+
+// Test adaptive numeric differentiation by synthetically adding random noise
+// to a functor.
+// y = x^2 + [random noise], dy/dx ~ 2x
+class RandomizedFunctor {
+ public:
+  RandomizedFunctor(double noise_factor, unsigned int random_seed)
+      : noise_factor_(noise_factor), random_seed_(random_seed) {
+  }
+
+  bool operator()(const double* x1, double* residuals) const;
+  void ExpectCostFunctionEvaluationIsNearlyCorrect(
+      const CostFunction& cost_function) const;
+
+ private:
+  double noise_factor_;
+  unsigned int random_seed_;
+};
+
+class RandomizedCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  RandomizedCostFunction(double noise_factor, unsigned int random_seed)
+      : functor_(noise_factor, random_seed) {
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** /* not used */) const {
+    return functor_(parameters[0], residuals);
+  }
+
+ private:
+  RandomizedFunctor functor_;
+};
+
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_NUMERIC_DIFF_TEST_UTILS_H_
diff --git a/internal/ceres/ordered_groups_test.cc b/internal/ceres/ordered_groups_test.cc
new file mode 100644
index 0000000..8cf4324
--- /dev/null
+++ b/internal/ceres/ordered_groups_test.cc
@@ -0,0 +1,232 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/ordered_groups.h"
+
+#include <cstddef>
+#include <vector>
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(OrderedGroups, EmptyOrderedGroupBehavesCorrectly) {
+  ParameterBlockOrdering ordering;
+  EXPECT_EQ(ordering.NumGroups(), 0);
+  EXPECT_EQ(ordering.NumElements(), 0);
+  EXPECT_EQ(ordering.GroupSize(1), 0);
+  double x;
+  EXPECT_EQ(ordering.GroupId(&x), -1);
+  EXPECT_FALSE(ordering.Remove(&x));
+}
+
+TEST(OrderedGroups, EverythingInOneGroup) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 1);
+  ordering.AddElementToGroup(x + 2, 1);
+  ordering.AddElementToGroup(x, 1);
+
+  EXPECT_EQ(ordering.NumGroups(), 1);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 3);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+  EXPECT_EQ(ordering.GroupId(x), 1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 1);
+  EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+  ordering.Remove(x);
+  EXPECT_EQ(ordering.NumGroups(), 1);
+  EXPECT_EQ(ordering.NumElements(), 2);
+  EXPECT_EQ(ordering.GroupSize(1), 2);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+
+  EXPECT_EQ(ordering.GroupId(x), -1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 1);
+  EXPECT_EQ(ordering.GroupId(x + 2), 1);
+}
+
+TEST(OrderedGroups, StartInOneGroupAndThenSplit) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 1);
+  ordering.AddElementToGroup(x + 2, 1);
+  ordering.AddElementToGroup(x, 1);
+
+  EXPECT_EQ(ordering.NumGroups(), 1);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 3);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+  EXPECT_EQ(ordering.GroupId(x), 1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 1);
+  EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+  ordering.AddElementToGroup(x, 5);
+  EXPECT_EQ(ordering.NumGroups(), 2);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 2);
+  EXPECT_EQ(ordering.GroupSize(5), 1);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+
+  EXPECT_EQ(ordering.GroupId(x), 5);
+  EXPECT_EQ(ordering.GroupId(x + 1), 1);
+  EXPECT_EQ(ordering.GroupId(x + 2), 1);
+}
+
+TEST(OrderedGroups, AddAndRemoveEveryThingFromOneGroup) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 1);
+  ordering.AddElementToGroup(x + 2, 1);
+  ordering.AddElementToGroup(x, 1);
+
+  EXPECT_EQ(ordering.NumGroups(), 1);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 3);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+  EXPECT_EQ(ordering.GroupId(x), 1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 1);
+  EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+  ordering.AddElementToGroup(x, 5);
+  ordering.AddElementToGroup(x + 1, 5);
+  ordering.AddElementToGroup(x + 2, 5);
+  EXPECT_EQ(ordering.NumGroups(), 1);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 0);
+  EXPECT_EQ(ordering.GroupSize(5), 3);
+  EXPECT_EQ(ordering.GroupSize(0), 0);
+
+  EXPECT_EQ(ordering.GroupId(x), 5);
+  EXPECT_EQ(ordering.GroupId(x + 1), 5);
+  EXPECT_EQ(ordering.GroupId(x + 2), 5);
+}
+
+TEST(OrderedGroups, ReverseOrdering) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 2);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  EXPECT_EQ(ordering.NumGroups(), 2);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(1), 1);
+  EXPECT_EQ(ordering.GroupSize(2), 2);
+  EXPECT_EQ(ordering.GroupId(x), 1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 2);
+  EXPECT_EQ(ordering.GroupId(x + 2), 2);
+
+  ordering.Reverse();
+
+  EXPECT_EQ(ordering.NumGroups(), 2);
+  EXPECT_EQ(ordering.NumElements(), 3);
+  EXPECT_EQ(ordering.GroupSize(3), 1);
+  EXPECT_EQ(ordering.GroupSize(2), 2);
+  EXPECT_EQ(ordering.GroupId(x), 3);
+  EXPECT_EQ(ordering.GroupId(x + 1), 2);
+  EXPECT_EQ(ordering.GroupId(x + 2), 2);
+}
+
+TEST(OrderedGroups, ReverseOrderingWithEmptyOrderedGroups) {
+  ParameterBlockOrdering ordering;
+  // This should be a no-op.
+  ordering.Reverse();
+
+  // Ensure the properties of an empty OrderedGroups still hold after Reverse().
+  EXPECT_EQ(ordering.NumGroups(), 0);
+  EXPECT_EQ(ordering.NumElements(), 0);
+  EXPECT_EQ(ordering.GroupSize(1), 0);
+  double x;
+  EXPECT_EQ(ordering.GroupId(&x), -1);
+  EXPECT_FALSE(ordering.Remove(&x));
+}
+
+TEST(OrderedGroups, BulkRemove) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 2);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  std::vector<double*> elements_to_remove;
+  elements_to_remove.push_back(x);
+  elements_to_remove.push_back(x + 2);
+
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 2);
+  EXPECT_EQ(ordering.NumElements(), 1);
+  EXPECT_EQ(ordering.GroupId(x), -1);
+  EXPECT_EQ(ordering.GroupId(x + 1), 2);
+  EXPECT_EQ(ordering.GroupId(x + 2), -1);
+}
+
+TEST(OrderedGroups, BulkRemoveWithNoElements) {
+  ParameterBlockOrdering ordering;
+
+  double x[3];
+  std::vector<double*> elements_to_remove;
+  elements_to_remove.push_back(x);
+  elements_to_remove.push_back(x + 2);
+
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 2);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  elements_to_remove.clear();
+  EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+}
+
+TEST(OrderedGroups, MinNonZeroGroup) {
+  ParameterBlockOrdering ordering;
+  double x[3];
+
+  ordering.AddElementToGroup(x, 1);
+  ordering.AddElementToGroup(x + 1, 1);
+  ordering.AddElementToGroup(x + 2, 2);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+  ordering.Remove(x);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+  ordering.Remove(x + 1);
+
+  EXPECT_EQ(ordering.MinNonZeroGroup(), 2);
+  ordering.Remove(x + 2);
+
+  // No non-zero groups left.
+  EXPECT_DEATH_IF_SUPPORTED(ordering.MinNonZeroGroup(), "NumGroups");
+}
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/pair_hash.h b/internal/ceres/pair_hash.h
new file mode 100644
index 0000000..80453ba
--- /dev/null
+++ b/internal/ceres/pair_hash.h
@@ -0,0 +1,112 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A hasher for std::pair<T, T>.
+
+#ifndef CERES_INTERNAL_PAIR_HASH_H_
+#define CERES_INTERNAL_PAIR_HASH_H_
+
+#include "ceres/internal/port.h"
+#include <cstdint>
+#include <utility>
+
+namespace ceres {
+namespace internal {
+
+#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
+#define GG_LONGLONG(x) x##I64
+#define GG_ULONGLONG(x) x##UI64
+#else
+#define GG_LONGLONG(x) x##LL
+#define GG_ULONGLONG(x) x##ULL
+#endif
+
+// The hash function is due to Bob Jenkins (see
+// http://burtleburtle.net/bob/hash/index.html). Each mix takes 36 instructions,
+// in 18 cycles if you're lucky. On x86 architectures, this requires 45
+// instructions in 27 cycles, if you're lucky.
+//
+// 32bit version
+inline void hash_mix(uint32_t& a, uint32_t& b, uint32_t& c) {
+  a -= b; a -= c; a ^= (c>>13);
+  b -= c; b -= a; b ^= (a<<8);
+  c -= a; c -= b; c ^= (b>>13);
+  a -= b; a -= c; a ^= (c>>12);
+  b -= c; b -= a; b ^= (a<<16);
+  c -= a; c -= b; c ^= (b>>5);
+  a -= b; a -= c; a ^= (c>>3);
+  b -= c; b -= a; b ^= (a<<10);
+  c -= a; c -= b; c ^= (b>>15);
+}
+
+// 64bit version
+inline void hash_mix(uint64_t& a, uint64_t& b, uint64_t& c) {
+  a -= b; a -= c; a ^= (c>>43);
+  b -= c; b -= a; b ^= (a<<9);
+  c -= a; c -= b; c ^= (b>>8);
+  a -= b; a -= c; a ^= (c>>38);
+  b -= c; b -= a; b ^= (a<<23);
+  c -= a; c -= b; c ^= (b>>5);
+  a -= b; a -= c; a ^= (c>>35);
+  b -= c; b -= a; b ^= (a<<49);
+  c -= a; c -= b; c ^= (b>>11);
+}
+
+inline uint32_t Hash32NumWithSeed(uint32_t num, uint32_t c) {
+  // The golden ratio; an arbitrary value.
+  uint32_t b = 0x9e3779b9UL;
+  hash_mix(num, b, c);
+  return c;
+}
+
+inline uint64_t Hash64NumWithSeed(uint64_t num, uint64_t c) {
+  // More of the golden ratio.
+  uint64_t b = GG_ULONGLONG(0xe08c1d668b756f82);
+  hash_mix(num, b, c);
+  return c;
+}
+
+// Hasher for STL pairs. Requires hashers for both members to be defined.
+struct pair_hash {
+ public:
+  template <typename T>
+  std::size_t operator()(const std::pair<T, T>& p) const {
+    const std::size_t h1 = std::hash<T>()(p.first);
+    const std::size_t h2 = std::hash<T>()(p.second);
+    // The decision below is at compile time
+    return (sizeof(h1) <= sizeof(uint32_t)) ? Hash32NumWithSeed(h1, h2)
+                                            : Hash64NumWithSeed(h1, h2);
+  }
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PAIR_HASH_H_
diff --git a/internal/ceres/parallel_for.h b/internal/ceres/parallel_for.h
new file mode 100644
index 0000000..2da2320
--- /dev/null
+++ b/internal/ceres/parallel_for.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_PARALLEL_FOR_
+#define CERES_INTERNAL_PARALLEL_FOR_
+
+#include <functional>
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+namespace internal {
+
+// Returns the maximum number of threads supported by the threading backend
+// Ceres was compiled with.
+int MaxNumThreadsAvailable();
+
+// Execute the function for every element in the range [start, end) with at most
+// num_threads. It will execute all the work on the calling thread if
+// num_threads is 1.
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int)>& function);
+
+// Execute the function for every element in the range [start, end) with at most
+// num_threads. It will execute all the work on the calling thread if
+// num_threads is 1.  Each invocation of function() will be passed a thread_id
+// in [0, num_threads) that is guaranteed to be distinct from the value passed
+// to any concurrent execution of function().
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int thread_id, int i)>& function);
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PARALLEL_FOR_H_
diff --git a/internal/ceres/parallel_for_cxx.cc b/internal/ceres/parallel_for_cxx.cc
new file mode 100644
index 0000000..b6ef709
--- /dev/null
+++ b/internal/ceres/parallel_for_cxx.cc
@@ -0,0 +1,247 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+
+#include "ceres/parallel_for.h"
+
+#include <cmath>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+
+#include "ceres/concurrent_queue.h"
+#include "ceres/scoped_thread_token.h"
+#include "ceres/thread_token_provider.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+// This class creates a thread safe barrier which will block until a
+// pre-specified number of threads call Finished.  This allows us to block the
+// main thread until all the parallel threads are finished processing all the
+// work.
+class BlockUntilFinished {
+ public:
+  explicit BlockUntilFinished(int num_total)
+      : num_finished_(0), num_total_(num_total) {}
+
+  // Increment the number of jobs that have finished and signal the blocking
+  // thread if all jobs have finished.
+  void Finished() {
+    std::lock_guard<std::mutex> lock(mutex_);
+    ++num_finished_;
+    CHECK_LE(num_finished_, num_total_);
+    if (num_finished_ == num_total_) {
+      condition_.notify_one();
+    }
+  }
+
+  // Block until all threads have signaled they are finished.
+  void Block() {
+    std::unique_lock<std::mutex> lock(mutex_);
+    condition_.wait(lock, [&]() { return num_finished_ == num_total_; });
+  }
+
+ private:
+  std::mutex mutex_;
+  std::condition_variable condition_;
+  // The current number of jobs finished.
+  int num_finished_;
+  // The total number of jobs.
+  int num_total_;
+};
+
+// Shared state between the parallel tasks. Each thread will use this
+// information to get the next block of work to be performed.
+struct SharedState {
+  SharedState(int start, int end, int num_work_items)
+      : start(start),
+        end(end),
+        num_work_items(num_work_items),
+        i(0),
+        thread_token_provider(num_work_items),
+        block_until_finished(num_work_items) {}
+
+  // The start and end index of the for loop.
+  const int start;
+  const int end;
+  // The number of blocks that need to be processed.
+  const int num_work_items;
+
+  // The next block of work to be assigned to a worker.  The parallel for loop
+  // range is split into num_work_items blocks of work, i.e. a single block of
+  // work is:
+  //  for (int j = start + i; j < end; j += num_work_items) { ... }.
+  int i;
+  std::mutex mutex_i;
+
+  // Provides a unique thread ID among all active threads working on the same
+  // group of tasks.  Thread-safe.
+  ThreadTokenProvider thread_token_provider;
+
+  // Used to signal when all the work has been completed.  Thread safe.
+  BlockUntilFinished block_until_finished;
+};
+
+}  // namespace
+
+int MaxNumThreadsAvailable() {
+  return ThreadPool::MaxNumThreadsAvailable();
+}
+
+// See ParallelFor (below) for more details.
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int)>& function) {
+  CHECK_GT(num_threads, 0);
+  CHECK(context != NULL);
+  if (end <= start) {
+    return;
+  }
+
+  // Fast path for when it is single threaded.
+  if (num_threads == 1) {
+    for (int i = start; i < end; ++i) {
+      function(i);
+    }
+    return;
+  }
+
+  ParallelFor(context, start, end, num_threads,
+              [&function](int /*thread_id*/, int i) { function(i); });
+}
+
+// This implementation uses a fixed size max worker pool with a shared task
+// queue. The problem of executing the function for the interval of [start, end)
+// is broken up into at most num_threads blocks and added to the thread pool. To
+// avoid deadlocks, the calling thread is allowed to steal work from the worker
+// pool. This is implemented via a shared state between the tasks. In order for
+// the calling thread or thread pool to get a block of work, it will query the
+// shared state for the next block of work to be done. If there is nothing left,
+// it will return. We will exit the ParallelFor call when all of the work has
+// been done, not when all of the tasks have been popped off the task queue.
+//
+// A unique thread ID among all active tasks will be acquired once for each
+// block of work.  This avoids the significant performance penalty for acquiring
+// it on every iteration of the for loop. The thread ID is guaranteed to be in
+// [0, num_threads).
+//
+// A performance analysis has shown this implementation is onpar with OpenMP and
+// TBB.
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int thread_id, int i)>& function) {
+  CHECK_GT(num_threads, 0);
+  CHECK(context != NULL);
+  if (end <= start) {
+    return;
+  }
+
+  // Fast path for when it is single threaded.
+  if (num_threads == 1) {
+    // Even though we only have one thread, use the thread token provider to
+    // guarantee the exact same behavior when running with multiple threads.
+    ThreadTokenProvider thread_token_provider(num_threads);
+    const ScopedThreadToken scoped_thread_token(&thread_token_provider);
+    const int thread_id = scoped_thread_token.token();
+    for (int i = start; i < end; ++i) {
+      function(thread_id, i);
+    }
+    return;
+  }
+
+  // We use a std::shared_ptr because the main thread can finish all
+  // the work before the tasks have been popped off the queue.  So the
+  // shared state needs to exist for the duration of all the tasks.
+  const int num_work_items = std::min((end - start), num_threads);
+  std::shared_ptr<SharedState> shared_state(
+      new SharedState(start, end, num_work_items));
+
+  // A function which tries to perform a chunk of work. This returns false if
+  // there is no work to be done.
+  auto task_function = [shared_state, &function]() {
+    int i = 0;
+    {
+      // Get the next available chunk of work to be performed. If there is no
+      // work, return false.
+      std::lock_guard<std::mutex> lock(shared_state->mutex_i);
+      if (shared_state->i >= shared_state->num_work_items) {
+        return false;
+      }
+      i = shared_state->i;
+      ++shared_state->i;
+    }
+
+    const ScopedThreadToken scoped_thread_token(
+        &shared_state->thread_token_provider);
+    const int thread_id = scoped_thread_token.token();
+
+    // Perform each task.
+    for (int j = shared_state->start + i;
+         j < shared_state->end;
+         j += shared_state->num_work_items) {
+      function(thread_id, j);
+    }
+    shared_state->block_until_finished.Finished();
+    return true;
+  };
+
+  // Add all the tasks to the thread pool.
+  for (int i = 0; i < num_work_items; ++i) {
+    // Note we are taking the task_function as value so the shared_state
+    // shared pointer is copied and the ref count is increased. This is to
+    // prevent it from being deleted when the main thread finishes all the
+    // work and exits before the threads finish.
+    context->thread_pool.AddTask([task_function]() { task_function(); });
+  }
+
+  // Try to do any available work on the main thread. This may steal work from
+  // the thread pool, but when there is no work left the thread pool tasks
+  // will be no-ops.
+  while (task_function()) {
+  }
+
+  // Wait until all tasks have finished.
+  shared_state->block_until_finished.Block();
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_USE_CXX11_THREADS
diff --git a/internal/ceres/parallel_for_nothreads.cc b/internal/ceres/parallel_for_nothreads.cc
new file mode 100644
index 0000000..e8f450a
--- /dev/null
+++ b/internal/ceres/parallel_for_nothreads.cc
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_NO_THREADS
+
+#include "ceres/parallel_for.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+int MaxNumThreadsAvailable() { return 1; }
+
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int)>& function) {
+  CHECK_GT(num_threads, 0);
+  CHECK(context != NULL);
+  if (end <= start) {
+    return;
+  }
+  for (int i = start; i < end; ++i) {
+    function(i);
+  }
+}
+
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int thread_id, int i)>& function) {
+  CHECK_GT(num_threads, 0);
+  CHECK(context != NULL);
+  if (end <= start) {
+    return;
+  }
+  const int thread_id = 0;
+  for (int i = start; i < end; ++i) {
+    function(thread_id, i);
+  }
+}
+
+}
+}
+
+#endif  // CERES_NO_THREADS
diff --git a/internal/ceres/parallel_for_openmp.cc b/internal/ceres/parallel_for_openmp.cc
new file mode 100644
index 0000000..8afe3b1
--- /dev/null
+++ b/internal/ceres/parallel_for_openmp.cc
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#if defined(CERES_USE_OPENMP)
+
+#include "ceres/parallel_for.h"
+
+#include "ceres/scoped_thread_token.h"
+#include "ceres/thread_token_provider.h"
+#include "glog/logging.h"
+#include "omp.h"
+
+namespace ceres {
+namespace internal {
+
+int MaxNumThreadsAvailable() {
+  return omp_get_max_threads();
+}
+
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int)>& function) {
+  CHECK_GT(num_threads, 0);
+  CHECK(context != NULL);
+  if (end <= start) {
+    return;
+  }
+
+#ifdef CERES_USE_OPENMP
+#pragma omp parallel for num_threads(num_threads) \
+    schedule(dynamic) if (num_threads > 1)
+#endif  // CERES_USE_OPENMP
+  for (int i = start; i < end; ++i) {
+    function(i);
+  }
+}
+
+void ParallelFor(ContextImpl* context,
+                 int start,
+                 int end,
+                 int num_threads,
+                 const std::function<void(int thread_id, int i)>& function) {
+  CHECK(context != NULL);
+
+  ThreadTokenProvider thread_token_provider(num_threads);
+  ParallelFor(context, start, end, num_threads, [&](int i) {
+    const ScopedThreadToken scoped_thread_token(&thread_token_provider);
+    const int thread_id = scoped_thread_token.token();
+    function(thread_id, i);
+  });
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // defined(CERES_USE_OPENMP)
diff --git a/internal/ceres/parallel_for_test.cc b/internal/ceres/parallel_for_test.cc
new file mode 100644
index 0000000..04e5783
--- /dev/null
+++ b/internal/ceres/parallel_for_test.cc
@@ -0,0 +1,163 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include "ceres/parallel_for.h"
+
+#include <cmath>
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include "ceres/context_impl.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::ElementsAreArray;
+using testing::UnorderedElementsAreArray;
+
+// Tests the parallel for loop computes the correct result for various number of
+// threads.
+TEST(ParallelFor, NumThreads) {
+  ContextImpl context;
+  context.EnsureMinimumThreads(/*num_threads=*/2);
+
+  const int size = 16;
+  std::vector<int> expected_results(size, 0);
+  for (int i = 0; i < size; ++i) {
+    expected_results[i] = std::sqrt(i);
+  }
+
+  for (int num_threads = 1; num_threads <= 8; ++num_threads) {
+    std::vector<int> values(size, 0);
+    ParallelFor(&context, 0, size, num_threads,
+                [&values](int i) { values[i] = std::sqrt(i); });
+    EXPECT_THAT(values, ElementsAreArray(expected_results));
+  }
+}
+
+// Tests the parallel for loop with the thread ID interface computes the correct
+// result for various number of threads.
+TEST(ParallelForWithThreadId, NumThreads) {
+  ContextImpl context;
+  context.EnsureMinimumThreads(/*num_threads=*/2);
+
+  const int size = 16;
+  std::vector<int> expected_results(size, 0);
+  for (int i = 0; i < size; ++i) {
+    expected_results[i] = std::sqrt(i);
+  }
+
+  for (int num_threads = 1; num_threads <= 8; ++num_threads) {
+    std::vector<int> values(size, 0);
+    ParallelFor(&context, 0, size, num_threads,
+                [&values](int thread_id, int i) { values[i] = std::sqrt(i); });
+    EXPECT_THAT(values, ElementsAreArray(expected_results));
+  }
+}
+
+// Tests nested for loops do not result in a deadlock.
+TEST(ParallelFor, NestedParallelForDeadlock) {
+  ContextImpl context;
+  context.EnsureMinimumThreads(/*num_threads=*/2);
+
+  // Increment each element in the 2D matrix.
+  std::vector<std::vector<int>> x(3, {1, 2, 3});
+  ParallelFor(&context, 0, 3, 2, [&x, &context](int i) {
+    std::vector<int>& y = x.at(i);
+    ParallelFor(&context, 0, 3, 2, [&y](int j) { ++y.at(j); });
+  });
+
+  const std::vector<int> results = {2, 3, 4};
+  for (const std::vector<int>& value : x) {
+    EXPECT_THAT(value, ElementsAreArray(results));
+  }
+}
+
+// Tests nested for loops do not result in a deadlock for the parallel for with
+// thread ID interface.
+TEST(ParallelForWithThreadId, NestedParallelForDeadlock) {
+  ContextImpl context;
+  context.EnsureMinimumThreads(/*num_threads=*/2);
+
+  // Increment each element in the 2D matrix.
+  std::vector<std::vector<int>> x(3, {1, 2, 3});
+  ParallelFor(&context, 0, 3, 2, [&x, &context](int thread_id, int i) {
+    std::vector<int>& y = x.at(i);
+    ParallelFor(&context, 0, 3, 2, [&y](int thread_id, int j) { ++y.at(j); });
+  });
+
+  const std::vector<int> results = {2, 3, 4};
+  for (const std::vector<int>& value : x) {
+    EXPECT_THAT(value, ElementsAreArray(results));
+  }
+}
+
+// This test is only valid when multithreading support is enabled.
+#ifndef CERES_NO_THREADS
+TEST(ParallelForWithThreadId, UniqueThreadIds) {
+  // Ensure the hardware supports more than 1 thread to ensure the test will
+  // pass.
+  const int num_hardware_threads = std::thread::hardware_concurrency();
+  if (num_hardware_threads <= 1) {
+    LOG(ERROR)
+        << "Test not supported, the hardware does not support threading.";
+    return;
+  }
+
+  ContextImpl context;
+  context.EnsureMinimumThreads(/*num_threads=*/2);
+  // Increment each element in the 2D matrix.
+  std::vector<int> x(2, -1);
+  std::mutex mutex;
+  std::condition_variable condition;
+  int count = 0;
+  ParallelFor(&context, 0, 2, 2,
+              [&x, &mutex, &condition, &count](int thread_id, int i) {
+                std::unique_lock<std::mutex> lock(mutex);
+                x[i] = thread_id;
+                ++count;
+                condition.notify_all();
+                condition.wait(lock, [&]() { return count == 2; });
+              });
+
+  EXPECT_THAT(x, UnorderedElementsAreArray({0,1}));
+}
+#endif  // CERES_NO_THREADS
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parallel_utils.cc b/internal/ceres/parallel_utils.cc
new file mode 100644
index 0000000..e1cb5f9
--- /dev/null
+++ b/internal/ceres/parallel_utils.cc
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+
+#include "ceres/parallel_utils.h"
+
+namespace ceres {
+namespace internal {
+
+void LinearIndexToUpperTriangularIndex(int k, int n, int* i, int* j) {
+  // This works by unfolding a rectangle into a triangle.
+  // Say n is even. 4 is a nice even number. The 10 i,j pairs that we
+  // want to produce are:
+  // 0,0 0,1 0,2 0,3
+  //     1,1 1,2 1,3
+  //         2,2 2,3
+  //             3,3
+  // This triangle can be folded into a 5x2 rectangle:
+  // 3,3 0,0 0,1 0,2 0,3
+  // 2,2 2,3 1,1 1,2 1,3
+
+  // If N is odd, say 5, then the 15 i,j pairs are:
+  // 0,0 0,1 0,2 0,3 0,4
+  //     1,1 1,2 1,3 1,4
+  //         2,2 2,3 2,3
+  //             3,3 3,4
+  //                 4,4
+  // which folds to a 5x3 rectangle:
+  // 0,0 0,1 0,2 0,3 0,4
+  // 4,4 1,1 1,2 1,3 1,4
+  // 3,3 3,4 2,2 2,3 2,4
+
+  // All this function does is map the linear iteration position to a
+  // location in the rectangle and work out the appropriate (i, j) for that
+  // location.
+  if (n & 1) {
+    // Odd n. The tip of the triangle is on row 1.
+    int w = n;  // Width of the rectangle to unfold
+    int i0 = k / w;
+    int j0 = k % w;
+    if (j0 >= i0) {
+      *i = i0;
+      *j = j0;
+    } else {
+      *i = n - i0;
+      *j = *i + j0;
+    }
+  } else {
+    // Even n. The tip of the triangle is on row 0, making it one wider.
+    int w = n + 1;
+    int i0 = k / w;
+    int j0 = k % w;
+    if (j0 > i0) {
+      *i = i0;
+      *j = j0 - 1;
+    } else {
+      *i = n - 1 - i0;
+      *j = *i + j0;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parallel_utils.h b/internal/ceres/parallel_utils.h
new file mode 100644
index 0000000..1291428
--- /dev/null
+++ b/internal/ceres/parallel_utils.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+
+#ifndef CERES_INTERNAL_PARALLEL_UTILS_H_
+#define CERES_INTERNAL_PARALLEL_UTILS_H_
+
+namespace ceres {
+namespace internal {
+
+// Converts a linear iteration order into a triangular iteration order.
+// Suppose you have nested loops that look like
+// for (int i = 0; i < n; i++) {
+//   for (int j = i; j < n; j++) {
+//     ... use i and j
+//   }
+// }
+// Naively using ParallelFor to parallelise those loops might look like
+// ParallelFor(..., 0, n * n, num_threads,
+//   [](int thread_id, int k) {
+//     int i = k / n, j = k % n;
+//     if (j < i) return;
+//     ...
+//    });
+// but these empty work items can lead to very unbalanced threading. Instead,
+// do this:
+// int actual_work_items = (n * (n + 1)) / 2;
+// ParallelFor(..., 0, actual_work_items, num_threads,
+//   [](int thread_id, int k) {
+//     int i, j;
+//     UnfoldIteration(k, n, &i, &j);
+//     ...
+//    });
+// which in each iteration will produce i and j satisfying
+// 0 <= i <= j < n
+void LinearIndexToUpperTriangularIndex(int k, int n, int* i, int* j);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PARALLEL_UTILS_H_
diff --git a/internal/ceres/parallel_utils_test.cc b/internal/ceres/parallel_utils_test.cc
new file mode 100644
index 0000000..f997d25
--- /dev/null
+++ b/internal/ceres/parallel_utils_test.cc
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+#include "ceres/parallel_utils.h"
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Tests that unfolding linear iterations to triangular iterations produces
+// indices that are in-range and unique.
+TEST(LinearIndexToUpperTriangularIndexTest, UniqueAndValid) {
+  for (int n = 0; n < 100; n++) {
+    std::set<std::pair<int, int>> seen_pairs;
+    int actual_work_items = (n * (n + 1)) / 2;
+    for (int k = 0; k < actual_work_items; k++) {
+      int i, j;
+      LinearIndexToUpperTriangularIndex(k, n, &i, &j);
+      EXPECT_GE(i, 0);
+      EXPECT_LT(i, n);
+      EXPECT_GE(j, i);
+      EXPECT_LT(j, n);
+      seen_pairs.insert(std::make_pair(i, j));
+    }
+    EXPECT_EQ(actual_work_items, seen_pairs.size());
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
new file mode 100644
index 0000000..7f2a911
--- /dev/null
+++ b/internal/ceres/parameter_block.h
@@ -0,0 +1,406 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_PARAMETER_BLOCK_H_
+#define CERES_INTERNAL_PARAMETER_BLOCK_H_
+
+#include <algorithm>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <string>
+#include <unordered_set>
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+class ProblemImpl;
+class ResidualBlock;
+
+// The parameter block encodes the location of the user's original value, and
+// also the "current state" of the parameter. The evaluator uses whatever is in
+// the current state of the parameter when evaluating. This is inlined since the
+// methods are performance sensitive.
+//
+// The class is not thread-safe, unless only const methods are called. The
+// parameter block may also hold a pointer to a local parameterization; the
+// parameter block does not take ownership of this pointer, so the user is
+// responsible for the proper disposal of the local parameterization.
+class ParameterBlock {
+ public:
+  typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
+
+  // Create a parameter block with the user state, size, and index specified.
+  // The size is the size of the parameter block and the index is the position
+  // of the parameter block inside a Program (if any).
+  ParameterBlock(double* user_state, int size, int index)
+      : user_state_(user_state),
+        state_(user_state),
+        size_(size),
+        index_(index) {}
+
+  ParameterBlock(double* user_state,
+                 int size,
+                 int index,
+                 LocalParameterization* local_parameterization)
+      : user_state_(user_state),
+        state_(user_state),
+        size_(size),
+        index_(index) {
+    if (local_parameterization != nullptr) {
+      SetParameterization(local_parameterization);
+    }
+  }
+
+  // The size of the parameter block.
+  int Size() const { return size_; }
+
+  // Manipulate the parameter state.
+  bool SetState(const double* x) {
+    CHECK(x != nullptr) << "Tried to set the state of constant parameter "
+                        << "with user location " << user_state_;
+    CHECK(!IsConstant()) << "Tried to set the state of constant parameter "
+                         << "with user location " << user_state_;
+
+    state_ = x;
+    return UpdateLocalParameterizationJacobian();
+  }
+
+  // Copy the current parameter state out to x. This is "GetState()" rather than
+  // simply "state()" since it is actively copying the data into the passed
+  // pointer.
+  void GetState(double* x) const {
+    if (x != state_) {
+      std::copy(state_, state_ + size_, x);
+    }
+  }
+
+  // Direct pointers to the current state.
+  const double* state() const { return state_; }
+  const double* user_state() const { return user_state_; }
+  double* mutable_user_state() { return user_state_; }
+  const LocalParameterization* local_parameterization() const {
+    return local_parameterization_;
+  }
+  LocalParameterization* mutable_local_parameterization() {
+    return local_parameterization_;
+  }
+
+  // Set this parameter block to vary or not.
+  void SetConstant() { is_set_constant_ = true; }
+  void SetVarying() { is_set_constant_ = false; }
+  bool IsSetConstantByUser() const { return is_set_constant_; }
+  bool IsConstant() const { return (is_set_constant_ || LocalSize() == 0); }
+
+  double UpperBound(int index) const {
+    return (upper_bounds_ ? upper_bounds_[index]
+                          : std::numeric_limits<double>::max());
+  }
+
+  double LowerBound(int index) const {
+    return (lower_bounds_ ? lower_bounds_[index]
+                          : -std::numeric_limits<double>::max());
+  }
+
+  bool IsUpperBounded() const { return (upper_bounds_ == nullptr); }
+  bool IsLowerBounded() const { return (lower_bounds_ == nullptr); }
+
+  // This parameter block's index in an array.
+  int index() const { return index_; }
+  void set_index(int index) { index_ = index; }
+
+  // This parameter offset inside a larger state vector.
+  int state_offset() const { return state_offset_; }
+  void set_state_offset(int state_offset) { state_offset_ = state_offset; }
+
+  // This parameter offset inside a larger delta vector.
+  int delta_offset() const { return delta_offset_; }
+  void set_delta_offset(int delta_offset) { delta_offset_ = delta_offset; }
+
+  // Methods relating to the parameter block's parameterization.
+
+  // The local to global jacobian. Returns nullptr if there is no local
+  // parameterization for this parameter block. The returned matrix is row-major
+  // and has Size() rows and  LocalSize() columns.
+  const double* LocalParameterizationJacobian() const {
+    return local_parameterization_jacobian_.get();
+  }
+
+  int LocalSize() const {
+    return (local_parameterization_ == nullptr)
+               ? size_
+               : local_parameterization_->LocalSize();
+  }
+
+  // Set the parameterization. The parameterization can be set exactly once;
+  // multiple calls to set the parameterization to different values will crash.
+  // It is an error to pass nullptr for the parameterization. The parameter
+  // block does not take ownership of the parameterization.
+  void SetParameterization(LocalParameterization* new_parameterization) {
+    CHECK(new_parameterization != nullptr)
+        << "nullptr parameterization invalid.";
+    // Nothing to do if the new parameterization is the same as the
+    // old parameterization.
+    if (new_parameterization == local_parameterization_) {
+      return;
+    }
+
+    CHECK(local_parameterization_ == nullptr)
+        << "Can't re-set the local parameterization; it leads to "
+        << "ambiguous ownership. Current local parameterization is: "
+        << local_parameterization_;
+
+    CHECK(new_parameterization->GlobalSize() == size_)
+        << "Invalid parameterization for parameter block. The parameter block "
+        << "has size " << size_ << " while the parameterization has a global "
+        << "size of " << new_parameterization->GlobalSize() << ". Did you "
+        << "accidentally use the wrong parameter block or parameterization?";
+
+    CHECK_GT(new_parameterization->LocalSize(), 0)
+        << "Invalid parameterization. Parameterizations must have a "
+        << "positive dimensional tangent space.";
+
+    local_parameterization_ = new_parameterization;
+    local_parameterization_jacobian_.reset(
+        new double[local_parameterization_->GlobalSize() *
+                   local_parameterization_->LocalSize()]);
+    CHECK(UpdateLocalParameterizationJacobian())
+        << "Local parameterization Jacobian computation failed for x: "
+        << ConstVectorRef(state_, Size()).transpose();
+  }
+
+  void SetUpperBound(int index, double upper_bound) {
+    CHECK_LT(index, size_);
+
+    if (upper_bound >= std::numeric_limits<double>::max() && !upper_bounds_) {
+      return;
+    }
+
+    if (!upper_bounds_) {
+      upper_bounds_.reset(new double[size_]);
+      std::fill(upper_bounds_.get(),
+                upper_bounds_.get() + size_,
+                std::numeric_limits<double>::max());
+    }
+
+    upper_bounds_[index] = upper_bound;
+  }
+
+  void SetLowerBound(int index, double lower_bound) {
+    CHECK_LT(index, size_);
+
+    if (lower_bound <= -std::numeric_limits<double>::max() && !lower_bounds_) {
+      return;
+    }
+
+    if (!lower_bounds_) {
+      lower_bounds_.reset(new double[size_]);
+      std::fill(lower_bounds_.get(),
+                lower_bounds_.get() + size_,
+                -std::numeric_limits<double>::max());
+    }
+
+    lower_bounds_[index] = lower_bound;
+  }
+
+  // Generalization of the addition operation. This is the same as
+  // LocalParameterization::Plus() followed by projection onto the
+  // hyper cube implied by the bounds constraints.
+  bool Plus(const double* x, const double* delta, double* x_plus_delta) {
+    if (local_parameterization_ != nullptr) {
+      if (!local_parameterization_->Plus(x, delta, x_plus_delta)) {
+        return false;
+      }
+    } else {
+      VectorRef(x_plus_delta, size_) =
+          ConstVectorRef(x, size_) + ConstVectorRef(delta, size_);
+    }
+
+    // Project onto the box constraints.
+    if (lower_bounds_.get() != nullptr) {
+      for (int i = 0; i < size_; ++i) {
+        x_plus_delta[i] = std::max(x_plus_delta[i], lower_bounds_[i]);
+      }
+    }
+
+    if (upper_bounds_.get() != nullptr) {
+      for (int i = 0; i < size_; ++i) {
+        x_plus_delta[i] = std::min(x_plus_delta[i], upper_bounds_[i]);
+      }
+    }
+
+    return true;
+  }
+
+  std::string ToString() const {
+    return StringPrintf(
+        "{ this=%p, user_state=%p, state=%p, size=%d, "
+        "constant=%d, index=%d, state_offset=%d, "
+        "delta_offset=%d }",
+        this,
+        user_state_,
+        state_,
+        size_,
+        is_set_constant_,
+        index_,
+        state_offset_,
+        delta_offset_);
+  }
+
+  void EnableResidualBlockDependencies() {
+    CHECK(residual_blocks_.get() == nullptr)
+        << "Ceres bug: There is already a residual block collection "
+        << "for parameter block: " << ToString();
+    residual_blocks_.reset(new ResidualBlockSet);
+  }
+
+  void AddResidualBlock(ResidualBlock* residual_block) {
+    CHECK(residual_blocks_.get() != nullptr)
+        << "Ceres bug: The residual block collection is null for parameter "
+        << "block: " << ToString();
+    residual_blocks_->insert(residual_block);
+  }
+
+  void RemoveResidualBlock(ResidualBlock* residual_block) {
+    CHECK(residual_blocks_.get() != nullptr)
+        << "Ceres bug: The residual block collection is null for parameter "
+        << "block: " << ToString();
+    CHECK(residual_blocks_->find(residual_block) != residual_blocks_->end())
+        << "Ceres bug: Missing residual for parameter block: " << ToString();
+    residual_blocks_->erase(residual_block);
+  }
+
+  // This is only intended for iterating; perhaps this should only expose
+  // .begin() and .end().
+  ResidualBlockSet* mutable_residual_blocks() { return residual_blocks_.get(); }
+
+  double LowerBoundForParameter(int index) const {
+    if (lower_bounds_.get() == nullptr) {
+      return -std::numeric_limits<double>::max();
+    } else {
+      return lower_bounds_[index];
+    }
+  }
+
+  double UpperBoundForParameter(int index) const {
+    if (upper_bounds_.get() == nullptr) {
+      return std::numeric_limits<double>::max();
+    } else {
+      return upper_bounds_[index];
+    }
+  }
+
+ private:
+  bool UpdateLocalParameterizationJacobian() {
+    if (local_parameterization_ == nullptr) {
+      return true;
+    }
+
+    // Update the local to global Jacobian. In some cases this is
+    // wasted effort; if this is a bottleneck, we will find a solution
+    // at that time.
+
+    const int jacobian_size = Size() * LocalSize();
+    InvalidateArray(jacobian_size, local_parameterization_jacobian_.get());
+    if (!local_parameterization_->ComputeJacobian(
+            state_, local_parameterization_jacobian_.get())) {
+      LOG(WARNING) << "Local parameterization Jacobian computation failed"
+                      "for x: "
+                   << ConstVectorRef(state_, Size()).transpose();
+      return false;
+    }
+
+    if (!IsArrayValid(jacobian_size, local_parameterization_jacobian_.get())) {
+      LOG(WARNING) << "Local parameterization Jacobian computation returned"
+                   << "an invalid matrix for x: "
+                   << ConstVectorRef(state_, Size()).transpose()
+                   << "\n Jacobian matrix : "
+                   << ConstMatrixRef(local_parameterization_jacobian_.get(),
+                                     Size(),
+                                     LocalSize());
+      return false;
+    }
+    return true;
+  }
+
+  double* user_state_ = nullptr;
+  int size_ = -1;
+  bool is_set_constant_ = false;
+  LocalParameterization* local_parameterization_ = nullptr;
+
+  // The "state" of the parameter. These fields are only needed while the
+  // solver is running. While at first glance using mutable is a bad idea, this
+  // ends up simplifying the internals of Ceres enough to justify the potential
+  // pitfalls of using "mutable."
+  mutable const double* state_ = nullptr;
+  mutable std::unique_ptr<double[]> local_parameterization_jacobian_;
+
+  // The index of the parameter. This is used by various other parts of Ceres to
+  // permit switching from a ParameterBlock* to an index in another array.
+  int32_t index_ = -1;
+
+  // The offset of this parameter block inside a larger state vector.
+  int32_t state_offset_ = -1;
+
+  // The offset of this parameter block inside a larger delta vector.
+  int32_t delta_offset_ = -1;
+
+  // If non-null, contains the residual blocks this parameter block is in.
+  std::unique_ptr<ResidualBlockSet> residual_blocks_;
+
+  // Upper and lower bounds for the parameter block.  SetUpperBound
+  // and SetLowerBound lazily initialize the upper_bounds_ and
+  // lower_bounds_ arrays. If they are never called, then memory for
+  // these arrays is never allocated. Thus for problems where there
+  // are no bounds, or only one sided bounds we do not pay the cost of
+  // allocating memory for the inactive bounds constraints.
+  //
+  // Upon initialization these arrays are initialized to
+  // std::numeric_limits<double>::max() and
+  // -std::numeric_limits<double>::max() respectively which correspond
+  // to the parameter block being unconstrained.
+  std::unique_ptr<double[]> upper_bounds_;
+  std::unique_ptr<double[]> lower_bounds_;
+
+  // Necessary so ProblemImpl can clean up the parameterizations.
+  friend class ProblemImpl;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PARAMETER_BLOCK_H_
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
new file mode 100644
index 0000000..ef521c0
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -0,0 +1,178 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/parameter_block_ordering.h"
+
+#include <memory>
+#include <unordered_set>
+
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::map;
+using std::set;
+using std::vector;
+
+int ComputeStableSchurOrdering(const Program& program,
+                         vector<ParameterBlock*>* ordering) {
+  CHECK(ordering != nullptr);
+  ordering->clear();
+  EventLogger event_logger("ComputeStableSchurOrdering");
+  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+  event_logger.AddEvent("CreateHessianGraph");
+
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+  const std::unordered_set<ParameterBlock*>& vertices = graph->vertices();
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    if (vertices.count(parameter_blocks[i]) > 0) {
+      ordering->push_back(parameter_blocks[i]);
+    }
+  }
+  event_logger.AddEvent("Preordering");
+
+  int independent_set_size = StableIndependentSetOrdering(*graph, ordering);
+  event_logger.AddEvent("StableIndependentSet");
+
+  // Add the excluded blocks to back of the ordering vector.
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks[i];
+    if (parameter_block->IsConstant()) {
+      ordering->push_back(parameter_block);
+    }
+  }
+  event_logger.AddEvent("ConstantParameterBlocks");
+
+  return independent_set_size;
+}
+
+int ComputeSchurOrdering(const Program& program,
+                         vector<ParameterBlock*>* ordering) {
+  CHECK(ordering != nullptr);
+  ordering->clear();
+
+  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+  int independent_set_size = IndependentSetOrdering(*graph, ordering);
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+
+  // Add the excluded blocks to back of the ordering vector.
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks[i];
+    if (parameter_block->IsConstant()) {
+      ordering->push_back(parameter_block);
+    }
+  }
+
+  return independent_set_size;
+}
+
+void ComputeRecursiveIndependentSetOrdering(const Program& program,
+                                            ParameterBlockOrdering* ordering) {
+  CHECK(ordering != nullptr);
+  ordering->Clear();
+  const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks();
+  std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+
+  int num_covered = 0;
+  int round = 0;
+  while (num_covered < parameter_blocks.size()) {
+    vector<ParameterBlock*> independent_set_ordering;
+    const int independent_set_size =
+        IndependentSetOrdering(*graph, &independent_set_ordering);
+    for (int i = 0; i < independent_set_size; ++i) {
+      ParameterBlock* parameter_block = independent_set_ordering[i];
+      ordering->AddElementToGroup(parameter_block->mutable_user_state(), round);
+      graph->RemoveVertex(parameter_block);
+    }
+    num_covered += independent_set_size;
+    ++round;
+  }
+}
+
+Graph<ParameterBlock*>* CreateHessianGraph(const Program& program) {
+  Graph<ParameterBlock*>* graph = new Graph<ParameterBlock*>;
+  CHECK(graph != nullptr);
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks[i];
+    if (!parameter_block->IsConstant()) {
+      graph->AddVertex(parameter_block);
+    }
+  }
+
+  const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+  for (int i = 0; i < residual_blocks.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks[i];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    ParameterBlock* const* parameter_blocks =
+        residual_block->parameter_blocks();
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (parameter_blocks[j]->IsConstant()) {
+        continue;
+      }
+
+      for (int k = j + 1; k < num_parameter_blocks; ++k) {
+        if (parameter_blocks[k]->IsConstant()) {
+          continue;
+        }
+
+        graph->AddEdge(parameter_blocks[j], parameter_blocks[k]);
+      }
+    }
+  }
+
+  return graph;
+}
+
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+                          vector<int>* group_sizes) {
+  CHECK(group_sizes != nullptr);
+  group_sizes->clear();
+  if (ordering == NULL) {
+    return;
+  }
+
+  const map<int, set<double*>>& group_to_elements =
+      ordering->group_to_elements();
+  for (const auto& g_t_e : group_to_elements) {
+    group_sizes->push_back(g_t_e.second.size());
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parameter_block_ordering.h b/internal/ceres/parameter_block_ordering.h
new file mode 100644
index 0000000..f996929
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering.h
@@ -0,0 +1,89 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
+#define CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
+
+#include <vector>
+#include "ceres/ordered_groups.h"
+#include "ceres/graph.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class ParameterBlock;
+
+// Uses an approximate independent set ordering to order the parameter
+// blocks of a problem so that it is suitable for use with Schur
+// complement based solvers. The output variable ordering contains an
+// ordering of the parameter blocks and the return value is size of
+// the independent set or the number of e_blocks (see
+// schur_complement_solver.h for an explanation). Constant parameters
+// are added to the end.
+//
+// The ordering vector has the structure
+//
+// ordering = [independent set,
+//             complement of the independent set,
+//             fixed blocks]
+int ComputeSchurOrdering(const Program& program,
+                         std::vector<ParameterBlock* >* ordering);
+
+// Same as above, except that ties while computing the independent set
+// ordering are resolved in favour of the order in which the parameter
+// blocks occur in the program.
+int ComputeStableSchurOrdering(const Program& program,
+                               std::vector<ParameterBlock* >* ordering);
+
+// Use an approximate independent set ordering to decompose the
+// parameter blocks of a problem in a sequence of independent
+// sets. The ordering covers all the non-constant parameter blocks in
+// the program.
+void ComputeRecursiveIndependentSetOrdering(const Program& program,
+                                            ParameterBlockOrdering* ordering);
+
+// Builds a graph on the parameter blocks of a Problem, whose
+// structure reflects the sparsity structure of the Hessian. Each
+// vertex corresponds to a parameter block in the Problem except for
+// parameter blocks that are marked constant. An edge connects two
+// parameter blocks, if they co-occur in a residual block.
+Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
+
+// Iterate over each of the groups in order of their priority and fill
+// summary with their sizes.
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+                          std::vector<int>* group_sizes);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
diff --git a/internal/ceres/parameter_block_ordering_test.cc b/internal/ceres/parameter_block_ordering_test.cc
new file mode 100644
index 0000000..ba61be6
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering_test.cc
@@ -0,0 +1,180 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/parameter_block_ordering.h"
+
+#include <cstddef>
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/graph.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/stl_util.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+typedef Graph<ParameterBlock*> HessianGraph;
+typedef std::unordered_set<ParameterBlock*> VertexSet;
+
+template <int M, int... Ns>
+class DummyCostFunction : public SizedCostFunction<M, Ns...> {
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return true;
+  }
+};
+
+class SchurOrderingTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    // The explicit calls to AddParameterBlock are necessary because
+    // the below tests depend on the specific numbering of the
+    // parameter blocks.
+    problem_.AddParameterBlock(x_, 3);
+    problem_.AddParameterBlock(y_, 4);
+    problem_.AddParameterBlock(z_, 5);
+    problem_.AddParameterBlock(w_, 6);
+
+    problem_.AddResidualBlock(new DummyCostFunction<2, 3>, NULL, x_);
+    problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
+    problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
+    problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
+    problem_.AddResidualBlock(new DummyCostFunction<1, 5, 3, 6>, NULL,
+                              z_, x_, w_);
+  }
+
+  ProblemImpl problem_;
+  double x_[3], y_[4], z_[5], w_[6];
+};
+
+TEST_F(SchurOrderingTest, NoFixed) {
+  const Program& program = problem_.program();
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+  std::unique_ptr<HessianGraph> graph(CreateHessianGraph(program));
+
+  const VertexSet& vertices = graph->vertices();
+  EXPECT_EQ(vertices.size(), 4);
+
+  for (int i = 0; i < 4; ++i) {
+    EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[0]);
+    EXPECT_EQ(neighbors.size(), 2);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
+    EXPECT_EQ(neighbors.size(), 1);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
+    EXPECT_EQ(neighbors.size(), 3);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
+    EXPECT_EQ(neighbors.size(), 2);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+  }
+}
+
+TEST_F(SchurOrderingTest, AllFixed) {
+  problem_.SetParameterBlockConstant(x_);
+  problem_.SetParameterBlockConstant(y_);
+  problem_.SetParameterBlockConstant(z_);
+  problem_.SetParameterBlockConstant(w_);
+
+  const Program& program = problem_.program();
+  std::unique_ptr<HessianGraph> graph(CreateHessianGraph(program));
+  EXPECT_EQ(graph->vertices().size(), 0);
+}
+
+TEST_F(SchurOrderingTest, OneFixed) {
+  problem_.SetParameterBlockConstant(x_);
+
+  const Program& program = problem_.program();
+  const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+  std::unique_ptr<HessianGraph> graph(CreateHessianGraph(program));
+
+  const VertexSet& vertices = graph->vertices();
+
+  EXPECT_EQ(vertices.size(), 3);
+  EXPECT_TRUE(vertices.find(parameter_blocks[0]) == vertices.end());
+
+  for (int i = 1; i < 3; ++i) {
+    EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
+    EXPECT_EQ(neighbors.size(), 1);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
+    EXPECT_EQ(neighbors.size(), 2);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
+    EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+  }
+
+  {
+    const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
+    EXPECT_EQ(neighbors.size(), 1);
+    EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+  }
+
+  // The constant parameter block is at the end.
+  vector<ParameterBlock*> ordering;
+  ComputeSchurOrdering(program, &ordering);
+  EXPECT_EQ(ordering.back(), parameter_blocks[0]);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parameter_block_test.cc b/internal/ceres/parameter_block_test.cc
new file mode 100644
index 0000000..e33db48
--- /dev/null
+++ b/internal/ceres/parameter_block_test.cc
@@ -0,0 +1,252 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/parameter_block.h"
+
+#include "gtest/gtest.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(ParameterBlock, SetLocalParameterizationDiesOnSizeMismatch) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  SubsetParameterization subset_wrong_size(4, indices);
+  EXPECT_DEATH_IF_SUPPORTED(
+      parameter_block.SetParameterization(&subset_wrong_size), "global");
+}
+
+TEST(ParameterBlock, SetLocalParameterizationWithSameExistingParameterization) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  SubsetParameterization subset(3, indices);
+  parameter_block.SetParameterization(&subset);
+  parameter_block.SetParameterization(&subset);
+}
+
+TEST(ParameterBlock, SetLocalParameterizationDiesWhenResettingToNull) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  SubsetParameterization subset(3, indices);
+  parameter_block.SetParameterization(&subset);
+  EXPECT_DEATH_IF_SUPPORTED(parameter_block.SetParameterization(nullptr), "nullptr");
+}
+
+TEST(ParameterBlock,
+     SetLocalParameterizationDiesWhenResettingToDifferentParameterization) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  SubsetParameterization subset(3, indices);
+  parameter_block.SetParameterization(&subset);
+  SubsetParameterization subset_different(3, indices);
+  EXPECT_DEATH_IF_SUPPORTED(
+      parameter_block.SetParameterization(&subset_different), "re-set");
+}
+
+TEST(ParameterBlock, SetLocalParameterizationDiesOnNullParameterization) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  EXPECT_DEATH_IF_SUPPORTED(parameter_block.SetParameterization(nullptr), "nullptr");
+}
+
+TEST(ParameterBlock, SetParameterizationDiesOnZeroLocalSize) {
+  double x[3] = {1.0, 2.0, 3.0};
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(0);
+  indices.push_back(1);
+  indices.push_back(2);
+  SubsetParameterization subset(3, indices);
+  EXPECT_DEATH_IF_SUPPORTED(parameter_block.SetParameterization(&subset),
+                            "positive dimensional tangent");
+}
+
+TEST(ParameterBlock, SetLocalParameterizationAndNormalOperation) {
+  double x[3] = { 1.0, 2.0, 3.0 };
+  ParameterBlock parameter_block(x, 3, -1);
+  std::vector<int> indices;
+  indices.push_back(1);
+  SubsetParameterization subset(3, indices);
+  parameter_block.SetParameterization(&subset);
+
+  // Ensure the local parameterization jacobian result is correctly computed.
+  ConstMatrixRef local_parameterization_jacobian(
+      parameter_block.LocalParameterizationJacobian(),
+      3,
+      2);
+  ASSERT_EQ(1.0, local_parameterization_jacobian(0, 0));
+  ASSERT_EQ(0.0, local_parameterization_jacobian(0, 1));
+  ASSERT_EQ(0.0, local_parameterization_jacobian(1, 0));
+  ASSERT_EQ(0.0, local_parameterization_jacobian(1, 1));
+  ASSERT_EQ(0.0, local_parameterization_jacobian(2, 0));
+  ASSERT_EQ(1.0, local_parameterization_jacobian(2, 1));
+
+  // Check that updating works as expected.
+  double x_plus_delta[3];
+  double delta[2] = { 0.5, 0.3 };
+  parameter_block.Plus(x, delta, x_plus_delta);
+  ASSERT_EQ(1.5, x_plus_delta[0]);
+  ASSERT_EQ(2.0, x_plus_delta[1]);
+  ASSERT_EQ(3.3, x_plus_delta[2]);
+}
+
+struct TestParameterization : public LocalParameterization {
+ public:
+  virtual ~TestParameterization() {}
+  virtual bool Plus(const double* x,
+                    const double* delta,
+                    double* x_plus_delta) const {
+    LOG(FATAL) << "Shouldn't get called.";
+    return true;
+  }
+  virtual bool ComputeJacobian(const double* x,
+                               double* jacobian) const {
+    jacobian[0] = *x * 2;
+    return true;
+  }
+
+  virtual int GlobalSize() const { return 1; }
+  virtual int LocalSize() const { return 1; }
+};
+
+TEST(ParameterBlock, SetStateUpdatesLocalParameterizationJacobian) {
+  TestParameterization test_parameterization;
+  double x[1] = { 1.0 };
+  ParameterBlock parameter_block(x, 1, -1, &test_parameterization);
+
+  EXPECT_EQ(2.0, *parameter_block.LocalParameterizationJacobian());
+
+  x[0] = 5.5;
+  parameter_block.SetState(x);
+  EXPECT_EQ(11.0, *parameter_block.LocalParameterizationJacobian());
+}
+
+TEST(ParameterBlock, PlusWithNoLocalParameterization) {
+  double x[2] = { 1.0, 2.0 };
+  ParameterBlock parameter_block(x, 2, -1);
+
+  double delta[2] = { 0.2, 0.3 };
+  double x_plus_delta[2];
+  parameter_block.Plus(x, delta, x_plus_delta);
+  EXPECT_EQ(1.2, x_plus_delta[0]);
+  EXPECT_EQ(2.3, x_plus_delta[1]);
+}
+
+// Stops computing the jacobian after the first time.
+class BadLocalParameterization : public LocalParameterization {
+ public:
+  BadLocalParameterization()
+      : calls_(0) {
+  }
+
+  virtual ~BadLocalParameterization() {}
+  virtual bool Plus(const double* x,
+                    const double* delta,
+                    double* x_plus_delta) const {
+    *x_plus_delta = *x + *delta;
+    return true;
+  }
+
+  virtual bool ComputeJacobian(const double* x, double* jacobian) const {
+    if (calls_ == 0) {
+      jacobian[0] = 0;
+    }
+    ++calls_;
+    return true;
+  }
+
+  virtual int GlobalSize() const { return 1;}
+  virtual int LocalSize()  const { return 1;}
+
+ private:
+  mutable int calls_;
+};
+
+TEST(ParameterBlock, DetectBadLocalParameterization) {
+  double x = 1;
+  BadLocalParameterization bad_parameterization;
+  ParameterBlock parameter_block(&x, 1, -1, &bad_parameterization);
+  double y = 2;
+  EXPECT_FALSE(parameter_block.SetState(&y));
+}
+
+TEST(ParameterBlock, DefaultBounds) {
+  double x[2];
+  ParameterBlock parameter_block(x, 2, -1, nullptr);
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(1),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(0),
+            -std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+            -std::numeric_limits<double>::max());
+}
+
+TEST(ParameterBlock, SetBounds) {
+  double x[2];
+  ParameterBlock parameter_block(x, 2, -1, nullptr);
+  parameter_block.SetLowerBound(0, 1);
+  parameter_block.SetUpperBound(1, 1);
+
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(0), 1.0);
+  EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+            -std::numeric_limits<double>::max());
+
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(parameter_block.UpperBoundForParameter(1), 1.0);
+}
+
+TEST(ParameterBlock, PlusWithBoundsConstraints) {
+  double x[] = {1.0, 0.0};
+  double delta[] = {2.0, -10.0};
+  ParameterBlock parameter_block(x, 2, -1, nullptr);
+  parameter_block.SetUpperBound(0, 2.0);
+  parameter_block.SetLowerBound(1, -1.0);
+  double x_plus_delta[2];
+  parameter_block.Plus(x, delta, x_plus_delta);
+  EXPECT_EQ(x_plus_delta[0], 2.0);
+  EXPECT_EQ(x_plus_delta[1], -1.0);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/parameter_dims_test.cc b/internal/ceres/parameter_dims_test.cc
new file mode 100644
index 0000000..f33536f
--- /dev/null
+++ b/internal/ceres/parameter_dims_test.cc
@@ -0,0 +1,104 @@
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+
+#include "ceres/internal/parameter_dims.h"
+
+#include <gtest/gtest.h>
+#include <type_traits>
+
+namespace ceres {
+namespace internal {
+
+// Is valid parameter dims unit test
+static_assert(IsValidParameterDimensionSequence(integer_sequence<int>()) ==
+                  true,
+              "Unit test of is valid parameter dimension sequence failed.");
+static_assert(
+    IsValidParameterDimensionSequence(integer_sequence<int, 2, 1>()) == true,
+    "Unit test of is valid parameter dimension sequence failed.");
+static_assert(
+    IsValidParameterDimensionSequence(integer_sequence<int, 0, 1>()) == false,
+    "Unit test of is valid parameter dimension sequence failed.");
+static_assert(
+    IsValidParameterDimensionSequence(integer_sequence<int, 3, 0>()) == false,
+    "Unit test of is valid parameter dimension sequence failed.");
+
+// Static parameter dims unit test
+static_assert(
+    std::is_same<StaticParameterDims<4, 2, 1>::Parameters,
+                 integer_sequence<int, 4, 2, 1>>::value == true,
+    "Unit test of type 'parameters' for static parameter dims failed.");
+
+static_assert(StaticParameterDims<4, 2, 1>::kIsValid == true,
+              "Unit test of is valid for static parameter dims failed.");
+static_assert(StaticParameterDims<4, 2, 1>::kIsDynamic == false,
+              "Unit test of is dynamic for static parameter dims failed.");
+static_assert(StaticParameterDims<4, 2, 1>::kNumParameterBlocks == 3,
+              "Unit test of number of parameter blocks for static parameter "
+              "dims failed.");
+static_assert(
+    StaticParameterDims<4, 2, 1>::kNumParameters == 7,
+    "Unit test of number of parameters for static parameter dims failed.");
+
+// Dynamic parameter dims unit test
+static_assert(DynamicParameterDims::kIsValid == true,
+              "Unit test of is valid for dynamic parameter dims failed.");
+static_assert(DynamicParameterDims::kIsDynamic == true,
+              "Unit test of is dynamic for dynamic parameter dims failed.");
+static_assert(DynamicParameterDims::kNumParameterBlocks == 0,
+              "Unit test of number if parameter blocks for dynamic parameter "
+              "dims failed.");
+static_assert(
+    DynamicParameterDims::kNumParameters == 0,
+    "Unit test of number of parameters for dynamic parameter dims failed.");
+
+TEST(ParameterDims, GetDims) {
+  constexpr int N0 = 3;
+  constexpr int N1 = 4;
+  constexpr int N2 = 2;
+
+  StaticParameterDims<N0, N1, N2> params;
+  EXPECT_EQ(N0, params.GetDim(0));
+  EXPECT_EQ(N1, params.GetDim(1));
+  EXPECT_EQ(N2, params.GetDim(2));
+}
+
+TEST(ParameterDims, GetUnpackedParameters) {
+  constexpr int N0 = 3;
+  constexpr int N1 = 4;
+  constexpr int N2 = 2;
+
+  using ParameterDims = StaticParameterDims<N0, N1, N2>;
+
+  std::array<double, ParameterDims::kNumParameters> packed_parameters{};
+  std::array<double*, 3> unpacked_parameters =
+      ParameterDims::GetUnpackedParameters(packed_parameters.data());
+
+  EXPECT_EQ(packed_parameters.data(), unpacked_parameters[0]);
+  EXPECT_EQ(packed_parameters.data() + N0, unpacked_parameters[1]);
+  EXPECT_EQ(packed_parameters.data() + N0 + N1, unpacked_parameters[2]);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.cc b/internal/ceres/partitioned_matrix_view.cc
new file mode 100644
index 0000000..910f241
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view.cc
@@ -0,0 +1,158 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+#include "ceres/linear_solver.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+                                  const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 2)) {
+   return new PartitionedMatrixView<2, 2, 2>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 3)) {
+   return new PartitionedMatrixView<2, 2, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 4)) {
+   return new PartitionedMatrixView<2, 2, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2)) {
+   return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 3)) {
+   return new PartitionedMatrixView<2, 3, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 4)) {
+   return new PartitionedMatrixView<2, 3, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 6)) {
+   return new PartitionedMatrixView<2, 3, 6>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 9)) {
+   return new PartitionedMatrixView<2, 3, 9>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3)) {
+   return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   return new PartitionedMatrixView<2, 4, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   return new PartitionedMatrixView<2, 4, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 6)) {
+   return new PartitionedMatrixView<2, 4, 6>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 8)) {
+   return new PartitionedMatrixView<2, 4, 8>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 9)) {
+   return new PartitionedMatrixView<2, 4, 9>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4)) {
+   return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if (options.row_block_size == 2){
+   return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 2)) {
+   return new PartitionedMatrixView<4, 4, 2>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   return new PartitionedMatrixView<4, 4, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   return new PartitionedMatrixView<4, 4, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4)) {
+   return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+               matrix, options.elimination_groups[0]);
+};
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
new file mode 100644
index 0000000..6e75060
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -0,0 +1,152 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// For generalized bi-partite Jacobian matrices that arise in
+// Structure from Motion related problems, it is sometimes useful to
+// have access to the two parts of the matrix as linear operators
+// themselves. This class provides that functionality.
+
+#ifndef CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
+#define CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Given generalized bi-partite matrix A = [E F], with the same block
+// structure as required by the Schur complement based solver, found
+// in explicit_schur_complement_solver.h, provide access to the
+// matrices E and F and their outer products E'E and F'F with
+// themselves.
+//
+// Lack of BlockStructure object will result in a crash and if the
+// block structure of the matrix does not satisfy the requirements of
+// the Schur complement solver it will result in unpredictable and
+// wrong output.
+class PartitionedMatrixViewBase {
+ public:
+  virtual ~PartitionedMatrixViewBase() {}
+
+  // y += E'x
+  virtual void LeftMultiplyE(const double* x, double* y) const = 0;
+
+  // y += F'x
+  virtual void LeftMultiplyF(const double* x, double* y) const = 0;
+
+  // y += Ex
+  virtual void RightMultiplyE(const double* x, double* y) const = 0;
+
+  // y += Fx
+  virtual void RightMultiplyF(const double* x, double* y) const = 0;
+
+  // Create and return the block diagonal of the matrix E'E.
+  virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const = 0;
+
+  // Create and return the block diagonal of the matrix F'F. Caller
+  // owns the result.
+  virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const = 0;
+
+  // Compute the block diagonal of the matrix E'E and store it in
+  // block_diagonal. The matrix block_diagonal is expected to have a
+  // BlockStructure (preferably created using
+  // CreateBlockDiagonalMatrixEtE) which is has the same structure as
+  // the block diagonal of E'E.
+  virtual void UpdateBlockDiagonalEtE(
+      BlockSparseMatrix* block_diagonal) const = 0;
+
+  // Compute the block diagonal of the matrix F'F and store it in
+  // block_diagonal. The matrix block_diagonal is expected to have a
+  // BlockStructure (preferably created using
+  // CreateBlockDiagonalMatrixFtF) which is has the same structure as
+  // the block diagonal of F'F.
+  virtual void UpdateBlockDiagonalFtF(
+      BlockSparseMatrix* block_diagonal) const = 0;
+
+  virtual int num_col_blocks_e() const = 0;
+  virtual int num_col_blocks_f() const = 0;
+  virtual int num_cols_e()       const = 0;
+  virtual int num_cols_f()       const = 0;
+  virtual int num_rows()         const = 0;
+  virtual int num_cols()         const = 0;
+
+  static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
+                                           const BlockSparseMatrix& matrix);
+};
+
+template <int kRowBlockSize = Eigen::Dynamic,
+          int kEBlockSize = Eigen::Dynamic,
+          int kFBlockSize = Eigen::Dynamic >
+class PartitionedMatrixView : public PartitionedMatrixViewBase {
+ public:
+  // matrix = [E F], where the matrix E contains the first
+  // num_col_blocks_a column blocks.
+  PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e);
+
+  virtual ~PartitionedMatrixView();
+  virtual void LeftMultiplyE(const double* x, double* y) const;
+  virtual void LeftMultiplyF(const double* x, double* y) const;
+  virtual void RightMultiplyE(const double* x, double* y) const;
+  virtual void RightMultiplyF(const double* x, double* y) const;
+  virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+  virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+  virtual void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+  virtual void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
+  virtual int num_col_blocks_e() const { return num_col_blocks_e_;  }
+  virtual int num_col_blocks_f() const { return num_col_blocks_f_;  }
+  virtual int num_cols_e()       const { return num_cols_e_;        }
+  virtual int num_cols_f()       const { return num_cols_f_;        }
+  virtual int num_rows()         const { return matrix_.num_rows(); }
+  virtual int num_cols()         const { return matrix_.num_cols(); }
+
+ private:
+  BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
+                                                     int end_col_block) const;
+
+  const BlockSparseMatrix& matrix_;
+  int num_row_blocks_e_;
+  int num_col_blocks_e_;
+  int num_col_blocks_f_;
+  int num_cols_e_;
+  int num_cols_f_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
new file mode 100644
index 0000000..f3f548c
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -0,0 +1,380 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+PartitionedMatrixView(
+    const BlockSparseMatrix& matrix,
+    int num_col_blocks_e)
+    : matrix_(matrix),
+      num_col_blocks_e_(num_col_blocks_e) {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  CHECK(bs != nullptr);
+
+  num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_;
+
+  // Compute the number of row blocks in E. The number of row blocks
+  // in E maybe less than the number of row blocks in the input matrix
+  // as some of the row blocks at the bottom may not have any
+  // e_blocks. For a definition of what an e_block is, please see
+  // explicit_schur_complement_solver.h
+  num_row_blocks_e_ = 0;
+  for (int r = 0; r < bs->rows.size(); ++r) {
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    if (cells[0].block_id < num_col_blocks_e_) {
+      ++num_row_blocks_e_;
+    }
+  }
+
+  // Compute the number of columns in E and F.
+  num_cols_e_ = 0;
+  num_cols_f_ = 0;
+
+  for (int c = 0; c < bs->cols.size(); ++c) {
+    const Block& block = bs->cols[c];
+    if (c < num_col_blocks_e_) {
+      num_cols_e_ += block.size;
+    } else {
+      num_cols_f_ += block.size;
+    }
+  }
+
+  CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+~PartitionedMatrixView() {
+}
+
+// The next four methods don't seem to be particularly cache
+// friendly. This is an artifact of how the BlockStructure of the
+// input matrix is constructed. These methods will benefit from
+// multithreading as well as improved data layout.
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyE(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+  // by the first cell in each row block.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const int col_block_id = cell.block_id;
+    const int col_block_pos = bs->cols[col_block_id].position;
+    const int col_block_size = bs->cols[col_block_id].size;
+    MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+        values + cell.position, row_block_size, col_block_size,
+        x + col_block_pos,
+        y + row_block_pos);
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyF(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over row blocks, and if the row block is in E, then
+  // multiply by all the cells except the first one which is of type
+  // E. If the row block is not in E (i.e its in the bottom
+  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+  // are of type F and multiply by them all.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+          values + cells[c].position, row_block_size, col_block_size,
+          x + col_block_pos - num_cols_e_,
+          y + row_block_pos);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          values + cells[c].position, row_block_size, col_block_size,
+          x + col_block_pos - num_cols_e_,
+          y + row_block_pos);
+    }
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyE(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+  // by the first cell in each row block.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const int col_block_id = cell.block_id;
+    const int col_block_pos = bs->cols[col_block_id].position;
+    const int col_block_size = bs->cols[col_block_id].size;
+    MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+        values + cell.position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos);
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyF(const double* x, double* y) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+  // Iterate over row blocks, and if the row block is in E, then
+  // multiply by all the cells except the first one which is of type
+  // E. If the row block is not in E (i.e its in the bottom
+  // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+  // are of type F and multiply by them all.
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+        values + cells[c].position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos - num_cols_e_);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_pos = bs->rows[r].block.position;
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_pos = bs->cols[col_block_id].position;
+      const int col_block_size = bs->cols[col_block_id].size;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+        values + cells[c].position, row_block_size, col_block_size,
+        x + row_block_pos,
+        y + col_block_pos - num_cols_e_);
+    }
+  }
+}
+
+// Given a range of columns blocks of a matrix m, compute the block
+// structure of the block diagonal of the matrix m(:,
+// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
+// and return a BlockSparseMatrix with the this block structure. The
+// caller owns the result.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  CompressedRowBlockStructure* block_diagonal_structure =
+      new CompressedRowBlockStructure;
+
+  int block_position = 0;
+  int diagonal_cell_position = 0;
+
+  // Iterate over the column blocks, creating a new diagonal block for
+  // each column block.
+  for (int c = start_col_block; c < end_col_block; ++c) {
+    const Block& block = bs->cols[c];
+    block_diagonal_structure->cols.push_back(Block());
+    Block& diagonal_block = block_diagonal_structure->cols.back();
+    diagonal_block.size = block.size;
+    diagonal_block.position = block_position;
+
+    block_diagonal_structure->rows.push_back(CompressedRow());
+    CompressedRow& row = block_diagonal_structure->rows.back();
+    row.block = diagonal_block;
+
+    row.cells.push_back(Cell());
+    Cell& cell = row.cells.back();
+    cell.block_id = c - start_col_block;
+    cell.position = diagonal_cell_position;
+
+    block_position += block.size;
+    diagonal_cell_position += block.size * block.size;
+  }
+
+  // Build a BlockSparseMatrix with the just computed block
+  // structure.
+  return new BlockSparseMatrix(block_diagonal_structure);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalEtE() const {
+  BlockSparseMatrix* block_diagonal =
+      CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
+  UpdateBlockDiagonalEtE(block_diagonal);
+  return block_diagonal;
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalFtF() const {
+  BlockSparseMatrix* block_diagonal =
+      CreateBlockDiagonalMatrixLayout(
+          num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+  UpdateBlockDiagonalFtF(block_diagonal);
+  return block_diagonal;
+}
+
+// Similar to the code in RightMultiplyE, except instead of the matrix
+// vector multiply its an outer product.
+//
+//    block_diagonal = block_diagonal(E'E)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalEtE(
+    BlockSparseMatrix* block_diagonal) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  const CompressedRowBlockStructure* block_diagonal_structure =
+      block_diagonal->block_structure();
+
+  block_diagonal->SetZero();
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+    const Cell& cell = bs->rows[r].cells[0];
+    const int row_block_size = bs->rows[r].block.size;
+    const int block_id = cell.block_id;
+    const int col_block_size = bs->cols[block_id].size;
+    const int cell_position =
+        block_diagonal_structure->rows[block_id].cells[0].position;
+
+    MatrixTransposeMatrixMultiply
+        <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
+            values + cell.position, row_block_size, col_block_size,
+            values + cell.position, row_block_size, col_block_size,
+            block_diagonal->mutable_values() + cell_position,
+            0, 0, col_block_size, col_block_size);
+  }
+}
+
+// Similar to the code in RightMultiplyF, except instead of the matrix
+// vector multiply its an outer product.
+//
+//   block_diagonal = block_diagonal(F'F)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
+  const CompressedRowBlockStructure* bs = matrix_.block_structure();
+  const CompressedRowBlockStructure* block_diagonal_structure =
+      block_diagonal->block_structure();
+
+  block_diagonal->SetZero();
+  const double* values = matrix_.values();
+  for (int r = 0; r < num_row_blocks_e_; ++r) {
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 1; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_size = bs->cols[col_block_id].size;
+      const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+      const int cell_position =
+          block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+      MatrixTransposeMatrixMultiply
+          <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
+              values + cells[c].position, row_block_size, col_block_size,
+              values + cells[c].position, row_block_size, col_block_size,
+              block_diagonal->mutable_values() + cell_position,
+              0, 0, col_block_size, col_block_size);
+    }
+  }
+
+  for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+    const int row_block_size = bs->rows[r].block.size;
+    const std::vector<Cell>& cells = bs->rows[r].cells;
+    for (int c = 0; c < cells.size(); ++c) {
+      const int col_block_id = cells[c].block_id;
+      const int col_block_size = bs->cols[col_block_id].size;
+      const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+      const int cell_position =
+          block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+      MatrixTransposeMatrixMultiply
+          <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
+              values + cells[c].position, row_block_size, col_block_size,
+              values + cells[c].position, row_block_size, col_block_size,
+              block_diagonal->mutable_values() + cell_position,
+              0, 0, col_block_size, col_block_size);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view_template.py b/internal/ceres/partitioned_matrix_view_template.py
new file mode 100644
index 0000000..7894523
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_template.py
@@ -0,0 +1,152 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2015 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# PartitionedMatrixView class. Explicitly generating these
+# instantiations in separate .cc files breaks the compilation into
+# separate compilation unit rather than one large cc file.
+#
+# This script creates two sets of files.
+#
+# 1. partitioned_matrix_view_x_x_x.cc
+# where the x indicates the template parameters and
+#
+# 2. partitioned_matrix_view.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+"""
+
+DYNAMIC_FILE = """
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+"""
+
+SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_FILE_HEADER = """
+#include "ceres/linear_solver.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+                                  const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+FACTORY = """ return new PartitionedMatrixView<%s, %s, %s>(matrix, options.elimination_groups[0]);"""
+
+FACTORY_FOOTER = """
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+               matrix, options.elimination_groups[0]);
+};
+
+}  // namespace internal
+}  // namespace ceres
+"""
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
new file mode 100644
index 0000000..40b49ef
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -0,0 +1,174 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <memory>
+#include <vector>
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/random.h"
+#include "ceres/sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kEpsilon = 1e-14;
+
+class PartitionedMatrixViewTest : public ::testing::Test {
+ protected :
+  virtual void SetUp() {
+    srand(5);
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(2));
+    CHECK(problem != nullptr);
+    A_.reset(problem->A.release());
+
+    num_cols_ = A_->num_cols();
+    num_rows_ = A_->num_rows();
+    num_eliminate_blocks_ = problem->num_eliminate_blocks;
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks_);
+    pmv_.reset(PartitionedMatrixViewBase::Create(
+                   options,
+                   *down_cast<BlockSparseMatrix*>(A_.get())));
+  }
+
+  int num_rows_;
+  int num_cols_;
+  int num_eliminate_blocks_;
+  std::unique_ptr<SparseMatrix> A_;
+  std::unique_ptr<PartitionedMatrixViewBase> pmv_;
+};
+
+TEST_F(PartitionedMatrixViewTest, DimensionsTest) {
+  EXPECT_EQ(pmv_->num_col_blocks_e(), num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols_e(), num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols_f(), num_cols_ - num_eliminate_blocks_);
+  EXPECT_EQ(pmv_->num_cols(), A_->num_cols());
+  EXPECT_EQ(pmv_->num_rows(), A_->num_rows());
+}
+
+TEST_F(PartitionedMatrixViewTest, RightMultiplyE) {
+  Vector x1(pmv_->num_cols_e());
+  Vector x2(pmv_->num_cols());
+  x2.setZero();
+
+  for (int i = 0; i < pmv_->num_cols_e(); ++i) {
+    x1(i) = x2(i) = RandDouble();
+  }
+
+  Vector y1 = Vector::Zero(pmv_->num_rows());
+  pmv_->RightMultiplyE(x1.data(), y1.data());
+
+  Vector y2 = Vector::Zero(pmv_->num_rows());
+  A_->RightMultiply(x2.data(), y2.data());
+
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
+    EXPECT_NEAR(y1(i), y2(i), kEpsilon);
+  }
+}
+
+TEST_F(PartitionedMatrixViewTest, RightMultiplyF) {
+  Vector x1(pmv_->num_cols_f());
+  Vector x2 = Vector::Zero(pmv_->num_cols());
+
+  for (int i = 0; i < pmv_->num_cols_f(); ++i) {
+    x1(i) = RandDouble();
+    x2(i + pmv_->num_cols_e()) = x1(i);
+  }
+
+  Vector y1 = Vector::Zero(pmv_->num_rows());
+  pmv_->RightMultiplyF(x1.data(), y1.data());
+
+  Vector y2 = Vector::Zero(pmv_->num_rows());
+  A_->RightMultiply(x2.data(), y2.data());
+
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
+    EXPECT_NEAR(y1(i), y2(i), kEpsilon);
+  }
+}
+
+TEST_F(PartitionedMatrixViewTest, LeftMultiply) {
+  Vector x = Vector::Zero(pmv_->num_rows());
+  for (int i = 0; i < pmv_->num_rows(); ++i) {
+    x(i) = RandDouble();
+  }
+
+  Vector y = Vector::Zero(pmv_->num_cols());
+  Vector y1 = Vector::Zero(pmv_->num_cols_e());
+  Vector y2 = Vector::Zero(pmv_->num_cols_f());
+
+  A_->LeftMultiply(x.data(), y.data());
+  pmv_->LeftMultiplyE(x.data(), y1.data());
+  pmv_->LeftMultiplyF(x.data(), y2.data());
+
+  for (int i = 0; i < pmv_->num_cols(); ++i) {
+    EXPECT_NEAR(y(i),
+                (i < pmv_->num_cols_e()) ? y1(i) : y2(i - pmv_->num_cols_e()),
+                kEpsilon);
+  }
+}
+
+TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
+  std::unique_ptr<BlockSparseMatrix>
+      block_diagonal_ee(pmv_->CreateBlockDiagonalEtE());
+  const CompressedRowBlockStructure* bs  = block_diagonal_ee->block_structure();
+
+  EXPECT_EQ(block_diagonal_ee->num_rows(), 2);
+  EXPECT_EQ(block_diagonal_ee->num_cols(), 2);
+  EXPECT_EQ(bs->cols.size(), 2);
+  EXPECT_EQ(bs->rows.size(), 2);
+
+  EXPECT_NEAR(block_diagonal_ee->values()[0], 10.0, kEpsilon);
+  EXPECT_NEAR(block_diagonal_ee->values()[1], 155.0, kEpsilon);
+}
+
+TEST_F(PartitionedMatrixViewTest, BlockDiagonalFtF) {
+  std::unique_ptr<BlockSparseMatrix>
+      block_diagonal_ff(pmv_->CreateBlockDiagonalFtF());
+  const CompressedRowBlockStructure* bs  = block_diagonal_ff->block_structure();
+
+  EXPECT_EQ(block_diagonal_ff->num_rows(), 3);
+  EXPECT_EQ(block_diagonal_ff->num_cols(), 3);
+  EXPECT_EQ(bs->cols.size(), 3);
+  EXPECT_EQ(bs->rows.size(), 3);
+  EXPECT_NEAR(block_diagonal_ff->values()[0], 70.0, kEpsilon);
+  EXPECT_NEAR(block_diagonal_ff->values()[1], 17.0, kEpsilon);
+  EXPECT_NEAR(block_diagonal_ff->values()[2], 37.0, kEpsilon);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/polynomial.cc b/internal/ceres/polynomial.cc
new file mode 100644
index 0000000..20812f4
--- /dev/null
+++ b/internal/ceres/polynomial.cc
@@ -0,0 +1,394 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/polynomial.h"
+
+#include <cmath>
+#include <cstddef>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/function_sample.h"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+namespace {
+
+// Balancing function as described by B. N. Parlett and C. Reinsch,
+// "Balancing a Matrix for Calculation of Eigenvalues and Eigenvectors".
+// In: Numerische Mathematik, Volume 13, Number 4 (1969), 293-304,
+// Springer Berlin / Heidelberg. DOI: 10.1007/BF02165404
+void BalanceCompanionMatrix(Matrix* companion_matrix_ptr) {
+  CHECK(companion_matrix_ptr != nullptr);
+  Matrix& companion_matrix = *companion_matrix_ptr;
+  Matrix companion_matrix_offdiagonal = companion_matrix;
+  companion_matrix_offdiagonal.diagonal().setZero();
+
+  const int degree = companion_matrix.rows();
+
+  // gamma <= 1 controls how much a change in the scaling has to
+  // lower the 1-norm of the companion matrix to be accepted.
+  //
+  // gamma = 1 seems to lead to cycles (numerical issues?), so
+  // we set it slightly lower.
+  const double gamma = 0.9;
+
+  // Greedily scale row/column pairs until there is no change.
+  bool scaling_has_changed;
+  do {
+    scaling_has_changed = false;
+
+    for (int i = 0; i < degree; ++i) {
+      const double row_norm = companion_matrix_offdiagonal.row(i).lpNorm<1>();
+      const double col_norm = companion_matrix_offdiagonal.col(i).lpNorm<1>();
+
+      // Decompose row_norm/col_norm into mantissa * 2^exponent,
+      // where 0.5 <= mantissa < 1. Discard mantissa (return value
+      // of frexp), as only the exponent is needed.
+      int exponent = 0;
+      std::frexp(row_norm / col_norm, &exponent);
+      exponent /= 2;
+
+      if (exponent != 0) {
+        const double scaled_col_norm = std::ldexp(col_norm, exponent);
+        const double scaled_row_norm = std::ldexp(row_norm, -exponent);
+        if (scaled_col_norm + scaled_row_norm < gamma * (col_norm + row_norm)) {
+          // Accept the new scaling. (Multiplication by powers of 2 should not
+          // introduce rounding errors (ignoring non-normalized numbers and
+          // over- or underflow))
+          scaling_has_changed = true;
+          companion_matrix_offdiagonal.row(i) *= std::ldexp(1.0, -exponent);
+          companion_matrix_offdiagonal.col(i) *= std::ldexp(1.0, exponent);
+        }
+      }
+    }
+  } while (scaling_has_changed);
+
+  companion_matrix_offdiagonal.diagonal() = companion_matrix.diagonal();
+  companion_matrix = companion_matrix_offdiagonal;
+  VLOG(3) << "Balanced companion matrix is\n" << companion_matrix;
+}
+
+void BuildCompanionMatrix(const Vector& polynomial,
+                          Matrix* companion_matrix_ptr) {
+  CHECK(companion_matrix_ptr != nullptr);
+  Matrix& companion_matrix = *companion_matrix_ptr;
+
+  const int degree = polynomial.size() - 1;
+
+  companion_matrix.resize(degree, degree);
+  companion_matrix.setZero();
+  companion_matrix.diagonal(-1).setOnes();
+  companion_matrix.col(degree - 1) = -polynomial.reverse().head(degree);
+}
+
+// Remove leading terms with zero coefficients.
+Vector RemoveLeadingZeros(const Vector& polynomial_in) {
+  int i = 0;
+  while (i < (polynomial_in.size() - 1) && polynomial_in(i) == 0.0) {
+    ++i;
+  }
+  return polynomial_in.tail(polynomial_in.size() - i);
+}
+
+void FindLinearPolynomialRoots(const Vector& polynomial,
+                               Vector* real,
+                               Vector* imaginary) {
+  CHECK_EQ(polynomial.size(), 2);
+  if (real != NULL) {
+    real->resize(1);
+    (*real)(0) = -polynomial(1) / polynomial(0);
+  }
+
+  if (imaginary != NULL) {
+    imaginary->setZero(1);
+  }
+}
+
+void FindQuadraticPolynomialRoots(const Vector& polynomial,
+                                  Vector* real,
+                                  Vector* imaginary) {
+  CHECK_EQ(polynomial.size(), 3);
+  const double a = polynomial(0);
+  const double b = polynomial(1);
+  const double c = polynomial(2);
+  const double D = b * b - 4 * a * c;
+  const double sqrt_D = sqrt(fabs(D));
+  if (real != NULL) {
+    real->setZero(2);
+  }
+  if (imaginary != NULL) {
+    imaginary->setZero(2);
+  }
+
+  // Real roots.
+  if (D >= 0) {
+    if (real != NULL) {
+      // Stable quadratic roots according to BKP Horn.
+      // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf
+      if (b >= 0) {
+        (*real)(0) = (-b - sqrt_D) / (2.0 * a);
+        (*real)(1) = (2.0 * c) / (-b - sqrt_D);
+      } else {
+        (*real)(0) = (2.0 * c) / (-b + sqrt_D);
+        (*real)(1) = (-b + sqrt_D) / (2.0 * a);
+      }
+    }
+    return;
+  }
+
+  // Use the normal quadratic formula for the complex case.
+  if (real != NULL) {
+    (*real)(0) = -b / (2.0 * a);
+    (*real)(1) = -b / (2.0 * a);
+  }
+  if (imaginary != NULL) {
+    (*imaginary)(0) = sqrt_D / (2.0 * a);
+    (*imaginary)(1) = -sqrt_D / (2.0 * a);
+  }
+}
+}  // namespace
+
+bool FindPolynomialRoots(const Vector& polynomial_in,
+                         Vector* real,
+                         Vector* imaginary) {
+  if (polynomial_in.size() == 0) {
+    LOG(ERROR) << "Invalid polynomial of size 0 passed to FindPolynomialRoots";
+    return false;
+  }
+
+  Vector polynomial = RemoveLeadingZeros(polynomial_in);
+  const int degree = polynomial.size() - 1;
+
+  VLOG(3) << "Input polynomial: " << polynomial_in.transpose();
+  if (polynomial.size() != polynomial_in.size()) {
+    VLOG(3) << "Trimmed polynomial: " << polynomial.transpose();
+  }
+
+  // Is the polynomial constant?
+  if (degree == 0) {
+    LOG(WARNING) << "Trying to extract roots from a constant "
+                 << "polynomial in FindPolynomialRoots";
+    // We return true with no roots, not false, as if the polynomial is constant
+    // it is correct that there are no roots. It is not the case that they were
+    // there, but that we have failed to extract them.
+    return true;
+  }
+
+  // Linear
+  if (degree == 1) {
+    FindLinearPolynomialRoots(polynomial, real, imaginary);
+    return true;
+  }
+
+  // Quadratic
+  if (degree == 2) {
+    FindQuadraticPolynomialRoots(polynomial, real, imaginary);
+    return true;
+  }
+
+  // The degree is now known to be at least 3. For cubic or higher
+  // roots we use the method of companion matrices.
+
+  // Divide by leading term
+  const double leading_term = polynomial(0);
+  polynomial /= leading_term;
+
+  // Build and balance the companion matrix to the polynomial.
+  Matrix companion_matrix(degree, degree);
+  BuildCompanionMatrix(polynomial, &companion_matrix);
+  BalanceCompanionMatrix(&companion_matrix);
+
+  // Find its (complex) eigenvalues.
+  Eigen::EigenSolver<Matrix> solver(companion_matrix, false);
+  if (solver.info() != Eigen::Success) {
+    LOG(ERROR) << "Failed to extract eigenvalues from companion matrix.";
+    return false;
+  }
+
+  // Output roots
+  if (real != NULL) {
+    *real = solver.eigenvalues().real();
+  } else {
+    LOG(WARNING) << "NULL pointer passed as real argument to "
+                 << "FindPolynomialRoots. Real parts of the roots will not "
+                 << "be returned.";
+  }
+  if (imaginary != NULL) {
+    *imaginary = solver.eigenvalues().imag();
+  }
+  return true;
+}
+
+Vector DifferentiatePolynomial(const Vector& polynomial) {
+  const int degree = polynomial.rows() - 1;
+  CHECK_GE(degree, 0);
+
+  // Degree zero polynomials are constants, and their derivative does
+  // not result in a smaller degree polynomial, just a degree zero
+  // polynomial with value zero.
+  if (degree == 0) {
+    return Eigen::VectorXd::Zero(1);
+  }
+
+  Vector derivative(degree);
+  for (int i = 0; i < degree; ++i) {
+    derivative(i) = (degree - i) * polynomial(i);
+  }
+
+  return derivative;
+}
+
+void MinimizePolynomial(const Vector& polynomial,
+                        const double x_min,
+                        const double x_max,
+                        double* optimal_x,
+                        double* optimal_value) {
+  // Find the minimum of the polynomial at the two ends.
+  //
+  // We start by inspecting the middle of the interval. Technically
+  // this is not needed, but we do this to make this code as close to
+  // the minFunc package as possible.
+  *optimal_x = (x_min + x_max) / 2.0;
+  *optimal_value = EvaluatePolynomial(polynomial, *optimal_x);
+
+  const double x_min_value = EvaluatePolynomial(polynomial, x_min);
+  if (x_min_value < *optimal_value) {
+    *optimal_value = x_min_value;
+    *optimal_x = x_min;
+  }
+
+  const double x_max_value = EvaluatePolynomial(polynomial, x_max);
+  if (x_max_value < *optimal_value) {
+    *optimal_value = x_max_value;
+    *optimal_x = x_max;
+  }
+
+  // If the polynomial is linear or constant, we are done.
+  if (polynomial.rows() <= 2) {
+    return;
+  }
+
+  const Vector derivative = DifferentiatePolynomial(polynomial);
+  Vector roots_real;
+  if (!FindPolynomialRoots(derivative, &roots_real, NULL)) {
+    LOG(WARNING) << "Unable to find the critical points of "
+                 << "the interpolating polynomial.";
+    return;
+  }
+
+  // This is a bit of an overkill, as some of the roots may actually
+  // have a complex part, but its simpler to just check these values.
+  for (int i = 0; i < roots_real.rows(); ++i) {
+    const double root = roots_real(i);
+    if ((root < x_min) || (root > x_max)) {
+      continue;
+    }
+
+    const double value = EvaluatePolynomial(polynomial, root);
+    if (value < *optimal_value) {
+      *optimal_value = value;
+      *optimal_x = root;
+    }
+  }
+}
+
+Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
+  const int num_samples = samples.size();
+  int num_constraints = 0;
+  for (int i = 0; i < num_samples; ++i) {
+    if (samples[i].value_is_valid) {
+      ++num_constraints;
+    }
+    if (samples[i].gradient_is_valid) {
+      ++num_constraints;
+    }
+  }
+
+  const int degree = num_constraints - 1;
+
+  Matrix lhs = Matrix::Zero(num_constraints, num_constraints);
+  Vector rhs = Vector::Zero(num_constraints);
+
+  int row = 0;
+  for (int i = 0; i < num_samples; ++i) {
+    const FunctionSample& sample = samples[i];
+    if (sample.value_is_valid) {
+      for (int j = 0; j <= degree; ++j) {
+        lhs(row, j) = pow(sample.x, degree - j);
+      }
+      rhs(row) = sample.value;
+      ++row;
+    }
+
+    if (sample.gradient_is_valid) {
+      for (int j = 0; j < degree; ++j) {
+        lhs(row, j) = (degree - j) * pow(sample.x, degree - j - 1);
+      }
+      rhs(row) = sample.gradient;
+      ++row;
+    }
+  }
+
+  // TODO(sameeragarwal): This is a hack.
+  // https://github.com/ceres-solver/ceres-solver/issues/248
+  Eigen::FullPivLU<Matrix> lu(lhs);
+  return lu.setThreshold(0.0).solve(rhs);
+}
+
+void MinimizeInterpolatingPolynomial(const vector<FunctionSample>& samples,
+                                     double x_min,
+                                     double x_max,
+                                     double* optimal_x,
+                                     double* optimal_value) {
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  MinimizePolynomial(polynomial, x_min, x_max, optimal_x, optimal_value);
+  for (int i = 0; i < samples.size(); ++i) {
+    const FunctionSample& sample = samples[i];
+    if ((sample.x < x_min) || (sample.x > x_max)) {
+      continue;
+    }
+
+    const double value = EvaluatePolynomial(polynomial, sample.x);
+    if (value < *optimal_value) {
+      *optimal_x = sample.x;
+      *optimal_value = value;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/polynomial.h b/internal/ceres/polynomial.h
new file mode 100644
index 0000000..3e09bae
--- /dev/null
+++ b/internal/ceres/polynomial.h
@@ -0,0 +1,116 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+
+#include <vector>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+struct FunctionSample;
+
+// All polynomials are assumed to be the form
+//
+//   sum_{i=0}^N polynomial(i) x^{N-i}.
+//
+// and are given by a vector of coefficients of size N + 1.
+
+// Evaluate the polynomial at x using the Horner scheme.
+inline double EvaluatePolynomial(const Vector& polynomial, double x) {
+  double v = 0.0;
+  for (int i = 0; i < polynomial.size(); ++i) {
+    v = v * x + polynomial(i);
+  }
+  return v;
+}
+
+// Use the companion matrix eigenvalues to determine the roots of the
+// polynomial.
+//
+// This function returns true on success, false otherwise.
+// Failure indicates that the polynomial is invalid (of size 0) or
+// that the eigenvalues of the companion matrix could not be computed.
+// On failure, a more detailed message will be written to LOG(ERROR).
+// If real is not NULL, the real parts of the roots will be returned in it.
+// Likewise, if imaginary is not NULL, imaginary parts will be returned in it.
+bool FindPolynomialRoots(const Vector& polynomial,
+                         Vector* real,
+                         Vector* imaginary);
+
+// Return the derivative of the given polynomial. It is assumed that
+// the input polynomial is at least of degree zero.
+Vector DifferentiatePolynomial(const Vector& polynomial);
+
+// Find the minimum value of the polynomial in the interval [x_min,
+// x_max]. The minimum is obtained by computing all the roots of the
+// derivative of the input polynomial. All real roots within the
+// interval [x_min, x_max] are considered as well as the end points
+// x_min and x_max. Since polynomials are differentiable functions,
+// this ensures that the true minimum is found.
+void MinimizePolynomial(const Vector& polynomial,
+                        double x_min,
+                        double x_max,
+                        double* optimal_x,
+                        double* optimal_value);
+
+// Given a set of function value and/or gradient samples, find a
+// polynomial whose value and gradients are exactly equal to the ones
+// in samples.
+//
+// Generally speaking,
+//
+// degree = # values + # gradients - 1
+//
+// Of course its possible to sample a polynomial any number of times,
+// in which case, generally speaking the spurious higher order
+// coefficients will be zero.
+Vector FindInterpolatingPolynomial(const std::vector<FunctionSample>& samples);
+
+// Interpolate the function described by samples with a polynomial,
+// and minimize it on the interval [x_min, x_max]. Depending on the
+// input samples, it is possible that the interpolation or the root
+// finding algorithms may fail due to numerical difficulties. But the
+// function is guaranteed to return its best guess of an answer, by
+// considering the samples and the end points as possible solutions.
+void MinimizeInterpolatingPolynomial(const std::vector<FunctionSample>& samples,
+                                     double x_min,
+                                     double x_max,
+                                     double* optimal_x,
+                                     double* optimal_value);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
diff --git a/internal/ceres/polynomial_test.cc b/internal/ceres/polynomial_test.cc
new file mode 100644
index 0000000..00c8534
--- /dev/null
+++ b/internal/ceres/polynomial_test.cc
@@ -0,0 +1,517 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/polynomial.h"
+
+#include <limits>
+#include <cmath>
+#include <cstddef>
+#include <algorithm>
+#include "gtest/gtest.h"
+#include "ceres/function_sample.h"
+#include "ceres/test_util.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+namespace {
+
+// For IEEE-754 doubles, machine precision is about 2e-16.
+const double kEpsilon = 1e-13;
+const double kEpsilonLoose = 1e-9;
+
+// Return the constant polynomial p(x) = 1.23.
+Vector ConstantPolynomial(double value) {
+  Vector poly(1);
+  poly(0) = value;
+  return poly;
+}
+
+// Return the polynomial p(x) = poly(x) * (x - root).
+Vector AddRealRoot(const Vector& poly, double root) {
+  Vector poly2(poly.size() + 1);
+  poly2.setZero();
+  poly2.head(poly.size()) += poly;
+  poly2.tail(poly.size()) -= root * poly;
+  return poly2;
+}
+
+// Return the polynomial
+// p(x) = poly(x) * (x - real - imag*i) * (x - real + imag*i).
+Vector AddComplexRootPair(const Vector& poly, double real, double imag) {
+  Vector poly2(poly.size() + 2);
+  poly2.setZero();
+  // Multiply poly by x^2 - 2real + abs(real,imag)^2
+  poly2.head(poly.size()) += poly;
+  poly2.segment(1, poly.size()) -= 2 * real * poly;
+  poly2.tail(poly.size()) += (real*real + imag*imag) * poly;
+  return poly2;
+}
+
+// Sort the entries in a vector.
+// Needed because the roots are not returned in sorted order.
+Vector SortVector(const Vector& in) {
+  Vector out(in);
+  std::sort(out.data(), out.data() + out.size());
+  return out;
+}
+
+// Run a test with the polynomial defined by the N real roots in roots_real.
+// If use_real is false, NULL is passed as the real argument to
+// FindPolynomialRoots. If use_imaginary is false, NULL is passed as the
+// imaginary argument to FindPolynomialRoots.
+template<int N>
+void RunPolynomialTestRealRoots(const double (&real_roots)[N],
+                                bool use_real,
+                                bool use_imaginary,
+                                double epsilon) {
+  Vector real;
+  Vector imaginary;
+  Vector poly = ConstantPolynomial(1.23);
+  for (int i = 0; i < N; ++i) {
+    poly = AddRealRoot(poly, real_roots[i]);
+  }
+  Vector* const real_ptr = use_real ? &real : NULL;
+  Vector* const imaginary_ptr = use_imaginary ? &imaginary : NULL;
+  bool success = FindPolynomialRoots(poly, real_ptr, imaginary_ptr);
+
+  EXPECT_EQ(success, true);
+  if (use_real) {
+    EXPECT_EQ(real.size(), N);
+    real = SortVector(real);
+    ExpectArraysClose(N, real.data(), real_roots, epsilon);
+  }
+  if (use_imaginary) {
+    EXPECT_EQ(imaginary.size(), N);
+    const Vector zeros = Vector::Zero(N);
+    ExpectArraysClose(N, imaginary.data(), zeros.data(), epsilon);
+  }
+}
+}  // namespace
+
+TEST(Polynomial, InvalidPolynomialOfZeroLengthIsRejected) {
+  // Vector poly(0) is an ambiguous constructor call, so
+  // use the constructor with explicit column count.
+  Vector poly(0, 1);
+  Vector real;
+  Vector imag;
+  bool success = FindPolynomialRoots(poly, &real, &imag);
+
+  EXPECT_EQ(success, false);
+}
+
+TEST(Polynomial, ConstantPolynomialReturnsNoRoots) {
+  Vector poly = ConstantPolynomial(1.23);
+  Vector real;
+  Vector imag;
+  bool success = FindPolynomialRoots(poly, &real, &imag);
+
+  EXPECT_EQ(success, true);
+  EXPECT_EQ(real.size(), 0);
+  EXPECT_EQ(imag.size(), 0);
+}
+
+TEST(Polynomial, LinearPolynomialWithPositiveRootWorks) {
+  const double roots[1] = { 42.42 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, LinearPolynomialWithNegativeRootWorks) {
+  const double roots[1] = { -42.42 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, QuadraticPolynomialWithPositiveRootsWorks) {
+  const double roots[2] = { 1.0, 42.42 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, QuadraticPolynomialWithOneNegativeRootWorks) {
+  const double roots[2] = { -42.42, 1.0 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, QuadraticPolynomialWithTwoNegativeRootsWorks) {
+  const double roots[2] = { -42.42, -1.0 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, QuadraticPolynomialWithCloseRootsWorks) {
+  const double roots[2] = { 42.42, 42.43 };
+  RunPolynomialTestRealRoots(roots, true, false, kEpsilonLoose);
+}
+
+TEST(Polynomial, QuadraticPolynomialWithComplexRootsWorks) {
+  Vector real;
+  Vector imag;
+
+  Vector poly = ConstantPolynomial(1.23);
+  poly = AddComplexRootPair(poly, 42.42, 4.2);
+  bool success = FindPolynomialRoots(poly, &real, &imag);
+
+  EXPECT_EQ(success, true);
+  EXPECT_EQ(real.size(), 2);
+  EXPECT_EQ(imag.size(), 2);
+  ExpectClose(real(0), 42.42, kEpsilon);
+  ExpectClose(real(1), 42.42, kEpsilon);
+  ExpectClose(std::abs(imag(0)), 4.2, kEpsilon);
+  ExpectClose(std::abs(imag(1)), 4.2, kEpsilon);
+  ExpectClose(std::abs(imag(0) + imag(1)), 0.0, kEpsilon);
+}
+
+TEST(Polynomial, QuarticPolynomialWorks) {
+  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, QuarticPolynomialWithTwoClustersOfCloseRootsWorks) {
+  const double roots[4] = { 1.23e-1, 2.46e-1, 1.23e+5, 2.46e+5 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilonLoose);
+}
+
+TEST(Polynomial, QuarticPolynomialWithTwoZeroRootsWorks) {
+  const double roots[4] = { -42.42, 0.0, 0.0, 42.42 };
+  RunPolynomialTestRealRoots(roots, true, true, 2 * kEpsilonLoose);
+}
+
+TEST(Polynomial, QuarticMonomialWorks) {
+  const double roots[4] = { 0.0, 0.0, 0.0, 0.0 };
+  RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(Polynomial, NullPointerAsImaginaryPartWorks) {
+  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  RunPolynomialTestRealRoots(roots, true, false, kEpsilon);
+}
+
+TEST(Polynomial, NullPointerAsRealPartWorks) {
+  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  RunPolynomialTestRealRoots(roots, false, true, kEpsilon);
+}
+
+TEST(Polynomial, BothOutputArgumentsNullWorks) {
+  const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+  RunPolynomialTestRealRoots(roots, false, false, kEpsilon);
+}
+
+TEST(Polynomial, DifferentiateConstantPolynomial) {
+  // p(x) = 1;
+  Vector polynomial(1);
+  polynomial(0) = 1.0;
+  const Vector derivative = DifferentiatePolynomial(polynomial);
+  EXPECT_EQ(derivative.rows(), 1);
+  EXPECT_EQ(derivative(0), 0);
+}
+
+TEST(Polynomial, DifferentiateQuadraticPolynomial) {
+  // p(x) = x^2 + 2x + 3;
+  Vector polynomial(3);
+  polynomial(0) = 1.0;
+  polynomial(1) = 2.0;
+  polynomial(2) = 3.0;
+
+  const Vector derivative = DifferentiatePolynomial(polynomial);
+  EXPECT_EQ(derivative.rows(), 2);
+  EXPECT_EQ(derivative(0), 2.0);
+  EXPECT_EQ(derivative(1), 2.0);
+}
+
+TEST(Polynomial, MinimizeConstantPolynomial) {
+  // p(x) = 1;
+  Vector polynomial(1);
+  polynomial(0) = 1.0;
+
+  double optimal_x = 0.0;
+  double optimal_value = 0.0;
+  double min_x = 0.0;
+  double max_x = 1.0;
+  MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
+
+  EXPECT_EQ(optimal_value, 1.0);
+  EXPECT_LE(optimal_x, max_x);
+  EXPECT_GE(optimal_x, min_x);
+}
+
+TEST(Polynomial, MinimizeLinearPolynomial) {
+  // p(x) = x - 2
+  Vector polynomial(2);
+
+  polynomial(0) = 1.0;
+  polynomial(1) = 2.0;
+
+  double optimal_x = 0.0;
+  double optimal_value = 0.0;
+  double min_x = 0.0;
+  double max_x = 1.0;
+  MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
+
+  EXPECT_EQ(optimal_x, 0.0);
+  EXPECT_EQ(optimal_value, 2.0);
+}
+
+
+TEST(Polynomial, MinimizeQuadraticPolynomial) {
+  // p(x) = x^2 - 3 x + 2
+  // min_x = 3/2
+  // min_value = -1/4;
+  Vector polynomial(3);
+  polynomial(0) = 1.0;
+  polynomial(1) = -3.0;
+  polynomial(2) = 2.0;
+
+  double optimal_x = 0.0;
+  double optimal_value = 0.0;
+  double min_x = -2.0;
+  double max_x = 2.0;
+  MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
+  EXPECT_EQ(optimal_x, 3.0/2.0);
+  EXPECT_EQ(optimal_value, -1.0/4.0);
+
+  min_x = -2.0;
+  max_x = 1.0;
+  MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
+  EXPECT_EQ(optimal_x, 1.0);
+  EXPECT_EQ(optimal_value, 0.0);
+
+  min_x = 2.0;
+  max_x = 3.0;
+  MinimizePolynomial(polynomial, min_x, max_x, &optimal_x, &optimal_value);
+  EXPECT_EQ(optimal_x, 2.0);
+  EXPECT_EQ(optimal_value, 0.0);
+}
+
+TEST(Polymomial, ConstantInterpolatingPolynomial) {
+  // p(x) = 1.0
+  Vector true_polynomial(1);
+  true_polynomial << 1.0;
+
+  vector<FunctionSample> samples;
+  FunctionSample sample;
+  sample.x = 1.0;
+  sample.value = 1.0;
+  sample.value_is_valid = true;
+  samples.push_back(sample);
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-15);
+}
+
+TEST(Polynomial, LinearInterpolatingPolynomial) {
+  // p(x) = 2x - 1
+  Vector true_polynomial(2);
+  true_polynomial << 2.0, -1.0;
+
+  vector<FunctionSample> samples;
+  FunctionSample sample;
+  sample.x = 1.0;
+  sample.value = 1.0;
+  sample.value_is_valid = true;
+  sample.gradient = 2.0;
+  sample.gradient_is_valid = true;
+  samples.push_back(sample);
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-15);
+}
+
+TEST(Polynomial, QuadraticInterpolatingPolynomial) {
+  // p(x) = 2x^2 + 3x + 2
+  Vector true_polynomial(3);
+  true_polynomial << 2.0, 3.0, 2.0;
+
+  vector<FunctionSample> samples;
+  {
+    FunctionSample sample;
+    sample.x = 1.0;
+    sample.value = 7.0;
+    sample.value_is_valid = true;
+    sample.gradient = 7.0;
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = -3.0;
+    sample.value = 11.0;
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-15);
+}
+
+TEST(Polynomial, DeficientCubicInterpolatingPolynomial) {
+  // p(x) = 2x^2 + 3x + 2
+  Vector true_polynomial(4);
+  true_polynomial << 0.0, 2.0, 3.0, 2.0;
+
+  vector<FunctionSample> samples;
+  {
+    FunctionSample sample;
+    sample.x = 1.0;
+    sample.value = 7.0;
+    sample.value_is_valid = true;
+    sample.gradient = 7.0;
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = -3.0;
+    sample.value = 11.0;
+    sample.value_is_valid = true;
+    sample.gradient = -9;
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-14);
+}
+
+
+TEST(Polynomial, CubicInterpolatingPolynomialFromValues) {
+  // p(x) = x^3 + 2x^2 + 3x + 2
+  Vector true_polynomial(4);
+  true_polynomial << 1.0, 2.0, 3.0, 2.0;
+
+  vector<FunctionSample> samples;
+  {
+    FunctionSample sample;
+    sample.x = 1.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = -3.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = 2.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = 0.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-14);
+}
+
+TEST(Polynomial, CubicInterpolatingPolynomialFromValuesAndOneGradient) {
+  // p(x) = x^3 + 2x^2 + 3x + 2
+  Vector true_polynomial(4);
+  true_polynomial << 1.0, 2.0, 3.0, 2.0;
+  Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial);
+
+  vector<FunctionSample> samples;
+  {
+    FunctionSample sample;
+    sample.x = 1.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = -3.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = 2.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    sample.gradient = EvaluatePolynomial(true_gradient_polynomial, sample.x);
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-14);
+}
+
+TEST(Polynomial, CubicInterpolatingPolynomialFromValuesAndGradients) {
+  // p(x) = x^3 + 2x^2 + 3x + 2
+  Vector true_polynomial(4);
+  true_polynomial << 1.0, 2.0, 3.0, 2.0;
+  Vector true_gradient_polynomial = DifferentiatePolynomial(true_polynomial);
+
+  vector<FunctionSample> samples;
+  {
+    FunctionSample sample;
+    sample.x = -3.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    sample.gradient = EvaluatePolynomial(true_gradient_polynomial, sample.x);
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  {
+    FunctionSample sample;
+    sample.x = 2.0;
+    sample.value = EvaluatePolynomial(true_polynomial, sample.x);
+    sample.value_is_valid = true;
+    sample.gradient = EvaluatePolynomial(true_gradient_polynomial, sample.x);
+    sample.gradient_is_valid = true;
+    samples.push_back(sample);
+  }
+
+  const Vector polynomial = FindInterpolatingPolynomial(samples);
+  EXPECT_NEAR((true_polynomial - polynomial).norm(), 0.0, 1e-14);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
new file mode 100644
index 0000000..f98374e
--- /dev/null
+++ b/internal/ceres/preconditioner.cc
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/preconditioner.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Preconditioner::~Preconditioner() {
+}
+
+PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
+    PreconditionerType preconditioner_type) {
+  if (preconditioner_type == SCHUR_JACOBI ||
+      preconditioner_type == CLUSTER_JACOBI ||
+      preconditioner_type == CLUSTER_TRIDIAGONAL) {
+    return JACOBI;
+  }
+  return preconditioner_type;
+}
+
+SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper(
+    const SparseMatrix* matrix)
+    : matrix_(matrix) {
+  CHECK(matrix != nullptr);
+}
+
+SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {
+}
+
+bool SparseMatrixPreconditionerWrapper::UpdateImpl(const SparseMatrix& A,
+                                                   const double* D) {
+  return true;
+}
+
+void SparseMatrixPreconditionerWrapper::RightMultiply(const double* x,
+                                                      double* y) const {
+  matrix_->RightMultiply(x, y);
+}
+
+int  SparseMatrixPreconditionerWrapper::num_rows() const {
+  return matrix_->num_rows();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
new file mode 100644
index 0000000..476697d
--- /dev/null
+++ b/internal/ceres/preconditioner.h
@@ -0,0 +1,185 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_PRECONDITIONER_H_
+#define CERES_INTERNAL_PRECONDITIONER_H_
+
+#include <vector>
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
+#include "ceres/linear_operator.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class SparseMatrix;
+
+class Preconditioner : public LinearOperator {
+ public:
+  struct Options {
+    PreconditionerType type = JACOBI;
+    VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = SUITE_SPARSE;
+
+    // When using the subset preconditioner, all row blocks starting
+    // from this row block are used to construct the preconditioner.
+    //
+    // i.e., the Jacobian matrix A is horizontally partitioned as
+    //
+    // A = [P]
+    //     [Q]
+    //
+    // where P has subset_preconditioner_start_row_block row blocks,
+    // and the preconditioner is the inverse of the matrix Q'Q.
+    int subset_preconditioner_start_row_block = -1;
+
+    // See solver.h for information about these flags.
+    bool use_postordering = false;
+
+    // If possible, how many threads the preconditioner can use.
+    int num_threads = 1;
+
+    // Hints about the order in which the parameter blocks should be
+    // eliminated by the linear solver.
+    //
+    // For example if elimination_groups is a vector of size k, then
+    // the linear solver is informed that it should eliminate the
+    // parameter blocks 0 ... elimination_groups[0] - 1 first, and
+    // then elimination_groups[0] ... elimination_groups[1] - 1 and so
+    // on. Within each elimination group, the linear solver is free to
+    // choose how the parameter blocks are ordered. Different linear
+    // solvers have differing requirements on elimination_groups.
+    //
+    // The most common use is for Schur type solvers, where there
+    // should be at least two elimination groups and the first
+    // elimination group must form an independent set in the normal
+    // equations. The first elimination group corresponds to the
+    // num_eliminate_blocks in the Schur type solvers.
+    std::vector<int> elimination_groups;
+
+    // If the block sizes in a BlockSparseMatrix are fixed, then in
+    // some cases the Schur complement based solvers can detect and
+    // specialize on them.
+    //
+    // It is expected that these parameters are set programmatically
+    // rather than manually.
+    //
+    // Please see schur_complement_solver.h and schur_eliminator.h for
+    // more details.
+    int row_block_size = Eigen::Dynamic;
+    int e_block_size = Eigen::Dynamic;
+    int f_block_size = Eigen::Dynamic;
+
+    ContextImpl* context = nullptr;
+  };
+
+  // If the optimization problem is such that there are no remaining
+  // e-blocks, ITERATIVE_SCHUR with a Schur type preconditioner cannot
+  // be used. This function returns JACOBI if a preconditioner for
+  // ITERATIVE_SCHUR is used. The input preconditioner_type is
+  // returned otherwise.
+  static PreconditionerType PreconditionerForZeroEBlocks(
+      PreconditionerType preconditioner_type);
+
+  virtual ~Preconditioner();
+
+  // Update the numerical value of the preconditioner for the linear
+  // system:
+  //
+  //  |   A   | x = |b|
+  //  |diag(D)|     |0|
+  //
+  // for some vector b. It is important that the matrix A have the
+  // same block structure as the one used to construct this object.
+  //
+  // D can be NULL, in which case its interpreted as a diagonal matrix
+  // of size zero.
+  virtual bool Update(const LinearOperator& A, const double* D) = 0;
+
+  // LinearOperator interface. Since the operator is symmetric,
+  // LeftMultiply and num_cols are just calls to RightMultiply and
+  // num_rows respectively. Update() must be called before
+  // RightMultiply can be called.
+  virtual void RightMultiply(const double* x, double* y) const = 0;
+  virtual void LeftMultiply(const double* x, double* y) const {
+    return RightMultiply(x, y);
+  }
+
+  virtual int num_rows() const = 0;
+  virtual int num_cols() const {
+    return num_rows();
+  }
+};
+
+// This templated subclass of Preconditioner serves as a base class for
+// other preconditioners that depend on the particular matrix layout of
+// the underlying linear operator.
+template <typename MatrixType>
+class TypedPreconditioner : public Preconditioner {
+ public:
+  virtual ~TypedPreconditioner() {}
+  virtual bool Update(const LinearOperator& A, const double* D) {
+    return UpdateImpl(*down_cast<const MatrixType*>(&A), D);
+  }
+
+ private:
+  virtual bool UpdateImpl(const MatrixType& A, const double* D) = 0;
+};
+
+// Preconditioners that depend on access to the low level structure
+// of a SparseMatrix.
+typedef TypedPreconditioner<SparseMatrix>              SparseMatrixPreconditioner;               // NOLINT
+typedef TypedPreconditioner<BlockSparseMatrix>         BlockSparseMatrixPreconditioner;          // NOLINT
+typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner;  // NOLINT
+
+// Wrap a SparseMatrix object as a preconditioner.
+class SparseMatrixPreconditionerWrapper : public SparseMatrixPreconditioner {
+ public:
+  // Wrapper does NOT take ownership of the matrix pointer.
+  explicit SparseMatrixPreconditionerWrapper(const SparseMatrix* matrix);
+  virtual ~SparseMatrixPreconditionerWrapper();
+
+  // Preconditioner interface
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual int num_rows() const;
+
+ private:
+  virtual bool UpdateImpl(const SparseMatrix& A, const double* D);
+  const SparseMatrix* matrix_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PRECONDITIONER_H_
diff --git a/internal/ceres/preprocessor.cc b/internal/ceres/preprocessor.cc
new file mode 100644
index 0000000..0221914
--- /dev/null
+++ b/internal/ceres/preprocessor.cc
@@ -0,0 +1,104 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/callbacks.h"
+#include "ceres/gradient_checking_cost_function.h"
+#include "ceres/line_search_preprocessor.h"
+#include "ceres/parallel_for.h"
+#include "ceres/preprocessor.h"
+#include "ceres/problem_impl.h"
+#include "ceres/solver.h"
+#include "ceres/trust_region_preprocessor.h"
+
+namespace ceres {
+namespace internal {
+
+Preprocessor* Preprocessor::Create(MinimizerType minimizer_type) {
+  if (minimizer_type == TRUST_REGION) {
+    return new TrustRegionPreprocessor;
+  }
+
+  if (minimizer_type == LINE_SEARCH) {
+    return new LineSearchPreprocessor;
+  }
+
+  LOG(FATAL) << "Unknown minimizer_type: " << minimizer_type;
+  return NULL;
+}
+
+Preprocessor::~Preprocessor() {
+}
+
+void ChangeNumThreadsIfNeeded(Solver::Options* options) {
+  const int num_threads_available = MaxNumThreadsAvailable();
+  if (options->num_threads > num_threads_available) {
+    LOG(WARNING)
+        << "Specified options.num_threads: " << options->num_threads
+        << " exceeds maximum available from the threading model Ceres "
+        << "was compiled with: " << num_threads_available
+        << ".  Bounding to maximum number available.";
+    options->num_threads = num_threads_available;
+  }
+}
+
+void SetupCommonMinimizerOptions(PreprocessedProblem* pp) {
+  const Solver::Options& options = pp->options;
+  Program* program = pp->reduced_program.get();
+
+  // Assuming that the parameter blocks in the program have been
+  // reordered as needed, extract them into a contiguous vector.
+  pp->reduced_parameters.resize(program->NumParameters());
+  double* reduced_parameters = pp->reduced_parameters.data();
+  program->ParameterBlocksToStateVector(reduced_parameters);
+
+  Minimizer::Options& minimizer_options = pp->minimizer_options;
+  minimizer_options = Minimizer::Options(options);
+  minimizer_options.evaluator = pp->evaluator;
+
+  if (options.logging_type != SILENT) {
+    pp->logging_callback.reset(
+        new LoggingCallback(options.minimizer_type,
+                            options.minimizer_progress_to_stdout));
+    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+                                       pp->logging_callback.get());
+  }
+
+  if (options.update_state_every_iteration) {
+    pp->state_updating_callback.reset(
+      new StateUpdatingCallback(program, reduced_parameters));
+    // This must get pushed to the front of the callbacks so that it
+    // is run before any of the user callbacks.
+    minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+                                       pp->state_updating_callback.get());
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/preprocessor.h b/internal/ceres/preprocessor.h
new file mode 100644
index 0000000..99bd6c0
--- /dev/null
+++ b/internal/ceres/preprocessor.h
@@ -0,0 +1,123 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_PREPROCESSOR_H_
+#define CERES_INTERNAL_PREPROCESSOR_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/coordinate_descent_minimizer.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+struct PreprocessedProblem;
+
+// Given a Problem object and a Solver::Options object indicating the
+// configuration of the solver, the job of the Preprocessor is to
+// analyze the Problem and perform the setup needed to solve it using
+// the desired Minimization algorithm. The setup involves removing
+// redundancies in the input problem (inactive parameter and residual
+// blocks), finding fill reducing orderings as needed, configuring and
+// creating various objects needed by the Minimizer to solve the
+// problem such as an evaluator, a linear solver etc.
+//
+// Each Minimizer (LineSearchMinimizer and TrustRegionMinimizer) comes
+// with a corresponding Preprocessor (LineSearchPreprocessor and
+// TrustRegionPreprocessor) that knows about its needs and performs
+// the preprocessing needed.
+//
+// The output of the Preprocessor is stored in a PreprocessedProblem
+// object.
+class Preprocessor {
+ public:
+  // Factory.
+  static Preprocessor* Create(MinimizerType minimizer_type);
+  virtual ~Preprocessor();
+  virtual bool Preprocess(const Solver::Options& options,
+                          ProblemImpl* problem,
+                          PreprocessedProblem* pp) = 0;
+};
+
+// A PreprocessedProblem is the result of running the Preprocessor on
+// a Problem and Solver::Options object.
+struct PreprocessedProblem {
+  PreprocessedProblem()
+      : fixed_cost(0.0) {
+  }
+
+  std::string error;
+  Solver::Options options;
+  LinearSolver::Options linear_solver_options;
+  Evaluator::Options evaluator_options;
+  Minimizer::Options minimizer_options;
+
+  ProblemImpl* problem;
+  std::unique_ptr<ProblemImpl> gradient_checking_problem;
+  std::unique_ptr<Program> reduced_program;
+  std::unique_ptr<LinearSolver> linear_solver;
+  std::unique_ptr<IterationCallback> logging_callback;
+  std::unique_ptr<IterationCallback> state_updating_callback;
+
+  std::shared_ptr<Evaluator> evaluator;
+  std::shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
+
+  std::vector<double*> removed_parameter_blocks;
+  Vector reduced_parameters;
+  double fixed_cost;
+};
+
+// Common functions used by various preprocessors.
+
+// If the user has specified a num_threads > the maximum number of threads
+// available from the compiled threading model, bound the number of threads
+// to the maximum.
+void ChangeNumThreadsIfNeeded(Solver::Options* options);
+
+// Extract the effective parameter vector from the preprocessed
+// problem and setup bits of the Minimizer::Options object that are
+// common to all Preprocessors.
+void SetupCommonMinimizerOptions(PreprocessedProblem* pp);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PREPROCESSOR_H_
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
new file mode 100644
index 0000000..6939b46
--- /dev/null
+++ b/internal/ceres/problem.cc
@@ -0,0 +1,203 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         keir@google.com (Keir Mierle)
+
+#include "ceres/problem.h"
+
+#include <vector>
+#include "ceres/crs_matrix.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+
+using std::vector;
+
+Problem::Problem() : problem_impl_(new internal::ProblemImpl) {}
+Problem::Problem(const Problem::Options& options)
+    : problem_impl_(new internal::ProblemImpl(options)) {}
+Problem::~Problem() {}
+
+ResidualBlockId Problem::AddResidualBlock(
+    CostFunction* cost_function,
+    LossFunction* loss_function,
+    const vector<double*>& parameter_blocks) {
+  return problem_impl_->AddResidualBlock(
+      cost_function,
+      loss_function,
+      parameter_blocks.data(),
+      static_cast<int>(parameter_blocks.size()));
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+    CostFunction* cost_function,
+    LossFunction* loss_function,
+    double* const* const parameter_blocks,
+    int num_parameter_blocks) {
+  return problem_impl_->AddResidualBlock(cost_function,
+                                         loss_function,
+                                         parameter_blocks,
+                                         num_parameter_blocks);
+}
+
+void Problem::AddParameterBlock(double* values, int size) {
+  problem_impl_->AddParameterBlock(values, size);
+}
+
+void Problem::AddParameterBlock(double* values,
+                                int size,
+                                LocalParameterization* local_parameterization) {
+  problem_impl_->AddParameterBlock(values, size, local_parameterization);
+}
+
+void Problem::RemoveResidualBlock(ResidualBlockId residual_block) {
+  problem_impl_->RemoveResidualBlock(residual_block);
+}
+
+void Problem::RemoveParameterBlock(double* values) {
+  problem_impl_->RemoveParameterBlock(values);
+}
+
+void Problem::SetParameterBlockConstant(double* values) {
+  problem_impl_->SetParameterBlockConstant(values);
+}
+
+void Problem::SetParameterBlockVariable(double* values) {
+  problem_impl_->SetParameterBlockVariable(values);
+}
+
+bool Problem::IsParameterBlockConstant(double* values) const {
+  return problem_impl_->IsParameterBlockConstant(values);
+}
+
+void Problem::SetParameterization(
+    double* values,
+    LocalParameterization* local_parameterization) {
+  problem_impl_->SetParameterization(values, local_parameterization);
+}
+
+const LocalParameterization* Problem::GetParameterization(
+    double* values) const {
+  return problem_impl_->GetParameterization(values);
+}
+
+void Problem::SetParameterLowerBound(double* values,
+                                     int index,
+                                     double lower_bound) {
+  problem_impl_->SetParameterLowerBound(values, index, lower_bound);
+}
+
+void Problem::SetParameterUpperBound(double* values,
+                                     int index,
+                                     double upper_bound) {
+  problem_impl_->SetParameterUpperBound(values, index, upper_bound);
+}
+
+double Problem::GetParameterUpperBound(double* values, int index) const {
+  return problem_impl_->GetParameterUpperBound(values, index);
+}
+
+double Problem::GetParameterLowerBound(double* values, int index) const {
+  return problem_impl_->GetParameterLowerBound(values, index);
+}
+
+bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
+                       double* cost,
+                       vector<double>* residuals,
+                       vector<double>* gradient,
+                       CRSMatrix* jacobian) {
+  return problem_impl_->Evaluate(evaluate_options,
+                                 cost,
+                                 residuals,
+                                 gradient,
+                                 jacobian);
+}
+
+int Problem::NumParameterBlocks() const {
+  return problem_impl_->NumParameterBlocks();
+}
+
+int Problem::NumParameters() const {
+  return problem_impl_->NumParameters();
+}
+
+int Problem::NumResidualBlocks() const {
+  return problem_impl_->NumResidualBlocks();
+}
+
+int Problem::NumResiduals() const {
+  return problem_impl_->NumResiduals();
+}
+
+int Problem::ParameterBlockSize(const double* parameter_block) const {
+  return problem_impl_->ParameterBlockSize(parameter_block);
+}
+
+int Problem::ParameterBlockLocalSize(const double* parameter_block) const {
+  return problem_impl_->ParameterBlockLocalSize(parameter_block);
+}
+
+bool Problem::HasParameterBlock(const double* values) const {
+  return problem_impl_->HasParameterBlock(values);
+}
+
+void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const {
+  problem_impl_->GetParameterBlocks(parameter_blocks);
+}
+
+void Problem::GetResidualBlocks(
+    vector<ResidualBlockId>* residual_blocks) const {
+  problem_impl_->GetResidualBlocks(residual_blocks);
+}
+
+void Problem::GetParameterBlocksForResidualBlock(
+    const ResidualBlockId residual_block,
+    vector<double*>* parameter_blocks) const {
+  problem_impl_->GetParameterBlocksForResidualBlock(residual_block,
+                                                    parameter_blocks);
+}
+
+const CostFunction* Problem::GetCostFunctionForResidualBlock(
+    const ResidualBlockId residual_block) const {
+  return problem_impl_->GetCostFunctionForResidualBlock(residual_block);
+}
+
+const LossFunction* Problem::GetLossFunctionForResidualBlock(
+    const ResidualBlockId residual_block) const {
+  return problem_impl_->GetLossFunctionForResidualBlock(residual_block);
+}
+
+void Problem::GetResidualBlocksForParameterBlock(
+    const double* values,
+    vector<ResidualBlockId>* residual_blocks) const {
+  problem_impl_->GetResidualBlocksForParameterBlock(values,
+                                                    residual_blocks);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
new file mode 100644
index 0000000..40d5aa2
--- /dev/null
+++ b/internal/ceres/problem_impl.cc
@@ -0,0 +1,897 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         mierle@gmail.com (Keir Mierle)
+
+#include "ceres/problem_impl.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
+#include "ceres/cost_function.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/program_evaluator.h"
+#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "ceres/stl_util.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::map;
+using std::string;
+using std::vector;
+
+namespace {
+// Returns true if two regions of memory, a and b, with sizes size_a and size_b
+// respectively, overlap.
+bool RegionsAlias(const double* a, int size_a,
+                  const double* b, int size_b) {
+  return (a < b) ? b < (a + size_a)
+                 : a < (b + size_b);
+}
+
+void CheckForNoAliasing(double* existing_block,
+                        int existing_block_size,
+                        double* new_block,
+                        int new_block_size) {
+  CHECK(!RegionsAlias(existing_block, existing_block_size,
+                      new_block, new_block_size))
+      << "Aliasing detected between existing parameter block at memory "
+      << "location " << existing_block
+      << " and has size " << existing_block_size << " with new parameter "
+      << "block that has memory address " << new_block << " and would have "
+      << "size " << new_block_size << ".";
+}
+
+template <typename KeyType>
+void DecrementValueOrDeleteKey(const KeyType key,
+                               std::map<KeyType, int>* container) {
+  auto it = container->find(key);
+  if (it->second == 1) {
+    delete key;
+    container->erase(it);
+  } else {
+    --it->second;
+  }
+}
+
+template <typename ForwardIterator>
+void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
+                                         ForwardIterator end) {
+  while (begin != end) {
+    delete begin->first;
+    ++begin;
+  }
+}
+
+void InitializeContext(Context* context,
+                       ContextImpl** context_impl,
+                       bool* context_impl_owned) {
+  if (context == NULL) {
+    *context_impl_owned = true;
+    *context_impl = new ContextImpl;
+  } else {
+    *context_impl_owned = false;
+    *context_impl = down_cast<ContextImpl*>(context);
+  }
+}
+
+}  // namespace
+
+ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
+                                                       int size) {
+  CHECK(values != NULL) << "Null pointer passed to AddParameterBlock "
+                        << "for a parameter with size " << size;
+
+  // Ignore the request if there is a block for the given pointer already.
+  ParameterMap::iterator it = parameter_block_map_.find(values);
+  if (it != parameter_block_map_.end()) {
+    if (!options_.disable_all_safety_checks) {
+      int existing_size = it->second->Size();
+      CHECK(size == existing_size)
+          << "Tried adding a parameter block with the same double pointer, "
+          << values << ", twice, but with different block sizes. Original "
+          << "size was " << existing_size << " but new size is "
+          << size;
+    }
+    return it->second;
+  }
+
+  if (!options_.disable_all_safety_checks) {
+    // Before adding the parameter block, also check that it doesn't alias any
+    // other parameter blocks.
+    if (!parameter_block_map_.empty()) {
+      ParameterMap::iterator lb = parameter_block_map_.lower_bound(values);
+
+      // If lb is not the first block, check the previous block for aliasing.
+      if (lb != parameter_block_map_.begin()) {
+        ParameterMap::iterator previous = lb;
+        --previous;
+        CheckForNoAliasing(previous->first,
+                           previous->second->Size(),
+                           values,
+                           size);
+      }
+
+      // If lb is not off the end, check lb for aliasing.
+      if (lb != parameter_block_map_.end()) {
+        CheckForNoAliasing(lb->first,
+                           lb->second->Size(),
+                           values,
+                           size);
+      }
+    }
+  }
+
+  // Pass the index of the new parameter block as well to keep the index in
+  // sync with the position of the parameter in the program's parameter vector.
+  ParameterBlock* new_parameter_block =
+      new ParameterBlock(values, size, program_->parameter_blocks_.size());
+
+  // For dynamic problems, add the list of dependent residual blocks, which is
+  // empty to start.
+  if (options_.enable_fast_removal) {
+    new_parameter_block->EnableResidualBlockDependencies();
+  }
+  parameter_block_map_[values] = new_parameter_block;
+  program_->parameter_blocks_.push_back(new_parameter_block);
+  return new_parameter_block;
+}
+
+void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
+  CHECK(residual_block != nullptr);
+  // Perform no check on the validity of residual_block, that is handled in
+  // the public method: RemoveResidualBlock().
+
+  // If needed, remove the parameter dependencies on this residual block.
+  if (options_.enable_fast_removal) {
+    const int num_parameter_blocks_for_residual =
+        residual_block->NumParameterBlocks();
+    for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
+      residual_block->parameter_blocks()[i]
+          ->RemoveResidualBlock(residual_block);
+    }
+
+    ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
+    residual_block_set_.erase(it);
+  }
+  DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
+}
+
+// Deletes the residual block in question, assuming there are no other
+// references to it inside the problem (e.g. by another parameter). Referenced
+// cost and loss functions are tucked away for future deletion, since it is not
+// possible to know whether other parts of the problem depend on them without
+// doing a full scan.
+void ProblemImpl::DeleteBlock(ResidualBlock* residual_block) {
+  // The const casts here are legit, since ResidualBlock holds these
+  // pointers as const pointers but we have ownership of them and
+  // have the right to destroy them when the destructor is called.
+  CostFunction* cost_function =
+      const_cast<CostFunction*>(residual_block->cost_function());
+  if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+    DecrementValueOrDeleteKey(cost_function, &cost_function_ref_count_);
+  }
+
+  LossFunction* loss_function =
+      const_cast<LossFunction*>(residual_block->loss_function());
+  if (options_.loss_function_ownership == TAKE_OWNERSHIP &&
+      loss_function != NULL) {
+    DecrementValueOrDeleteKey(loss_function, &loss_function_ref_count_);
+  }
+
+  delete residual_block;
+}
+
+// Deletes the parameter block in question, assuming there are no other
+// references to it inside the problem (e.g. by any residual blocks).
+// Referenced parameterizations are tucked away for future deletion, since it
+// is not possible to know whether other parts of the problem depend on them
+// without doing a full scan.
+void ProblemImpl::DeleteBlock(ParameterBlock* parameter_block) {
+  if (options_.local_parameterization_ownership == TAKE_OWNERSHIP &&
+      parameter_block->local_parameterization() != NULL) {
+    local_parameterizations_to_delete_.push_back(
+        parameter_block->mutable_local_parameterization());
+  }
+  parameter_block_map_.erase(parameter_block->mutable_user_state());
+  delete parameter_block;
+}
+
+ProblemImpl::ProblemImpl()
+    : options_(Problem::Options()),
+      program_(new internal::Program) {
+  InitializeContext(options_.context, &context_impl_, &context_impl_owned_);
+}
+
+ProblemImpl::ProblemImpl(const Problem::Options& options)
+    : options_(options),
+      program_(new internal::Program) {
+  InitializeContext(options_.context, &context_impl_, &context_impl_owned_);
+}
+
+ProblemImpl::~ProblemImpl() {
+  STLDeleteContainerPointers(program_->residual_blocks_.begin(),
+                             program_->residual_blocks_.end());
+
+  if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+    STLDeleteContainerPairFirstPointers(cost_function_ref_count_.begin(),
+                                        cost_function_ref_count_.end());
+  }
+
+  if (options_.loss_function_ownership == TAKE_OWNERSHIP) {
+    STLDeleteContainerPairFirstPointers(loss_function_ref_count_.begin(),
+                                        loss_function_ref_count_.end());
+  }
+
+  // Collect the unique parameterizations and delete the parameters.
+  for (int i = 0; i < program_->parameter_blocks_.size(); ++i) {
+    DeleteBlock(program_->parameter_blocks_[i]);
+  }
+
+  // Delete the owned parameterizations.
+  STLDeleteUniqueContainerPointers(local_parameterizations_to_delete_.begin(),
+                                   local_parameterizations_to_delete_.end());
+
+  if (context_impl_owned_) {
+    delete context_impl_;
+  }
+}
+
+ResidualBlockId ProblemImpl::AddResidualBlock(
+      CostFunction* cost_function,
+      LossFunction* loss_function,
+      double* const* const parameter_blocks,
+      int num_parameter_blocks) {
+  CHECK(cost_function != nullptr);
+  CHECK_EQ(num_parameter_blocks,
+           cost_function->parameter_block_sizes().size());
+
+  // Check the sizes match.
+  const vector<int32_t>& parameter_block_sizes =
+      cost_function->parameter_block_sizes();
+
+  if (!options_.disable_all_safety_checks) {
+    CHECK_EQ(parameter_block_sizes.size(), num_parameter_blocks)
+        << "Number of blocks input is different than the number of blocks "
+        << "that the cost function expects.";
+
+    // Check for duplicate parameter blocks.
+    vector<double*> sorted_parameter_blocks(
+        parameter_blocks, parameter_blocks + num_parameter_blocks);
+    sort(sorted_parameter_blocks.begin(), sorted_parameter_blocks.end());
+    const bool has_duplicate_items =
+        (std::adjacent_find(sorted_parameter_blocks.begin(),
+                            sorted_parameter_blocks.end())
+         != sorted_parameter_blocks.end());
+    if (has_duplicate_items) {
+      string blocks;
+      for (int i = 0; i < num_parameter_blocks; ++i) {
+        blocks += StringPrintf(" %p ", parameter_blocks[i]);
+      }
+
+      LOG(FATAL) << "Duplicate parameter blocks in a residual parameter "
+                 << "are not allowed. Parameter block pointers: ["
+                 << blocks << "]";
+    }
+  }
+
+  // Add parameter blocks and convert the double*'s to parameter blocks.
+  vector<ParameterBlock*> parameter_block_ptrs(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    parameter_block_ptrs[i] =
+        InternalAddParameterBlock(parameter_blocks[i],
+                                  parameter_block_sizes[i]);
+  }
+
+  if (!options_.disable_all_safety_checks) {
+    // Check that the block sizes match the block sizes expected by the
+    // cost_function.
+    for (int i = 0; i < parameter_block_ptrs.size(); ++i) {
+      CHECK_EQ(cost_function->parameter_block_sizes()[i],
+               parameter_block_ptrs[i]->Size())
+          << "The cost function expects parameter block " << i
+          << " of size " << cost_function->parameter_block_sizes()[i]
+          << " but was given a block of size "
+          << parameter_block_ptrs[i]->Size();
+    }
+  }
+
+  ResidualBlock* new_residual_block =
+      new ResidualBlock(cost_function,
+                        loss_function,
+                        parameter_block_ptrs,
+                        program_->residual_blocks_.size());
+
+  // Add dependencies on the residual to the parameter blocks.
+  if (options_.enable_fast_removal) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      parameter_block_ptrs[i]->AddResidualBlock(new_residual_block);
+    }
+  }
+
+  program_->residual_blocks_.push_back(new_residual_block);
+
+  if (options_.enable_fast_removal) {
+    residual_block_set_.insert(new_residual_block);
+  }
+
+  if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+    // Increment the reference count, creating an entry in the table if
+    // needed. Note: C++ maps guarantee that new entries have default
+    // constructed values; this implies integers are zero initialized.
+    ++cost_function_ref_count_[cost_function];
+  }
+
+  if (options_.loss_function_ownership == TAKE_OWNERSHIP &&
+      loss_function != NULL) {
+    ++loss_function_ref_count_[loss_function];
+  }
+
+  return new_residual_block;
+}
+
+void ProblemImpl::AddParameterBlock(double* values, int size) {
+  InternalAddParameterBlock(values, size);
+}
+
+void ProblemImpl::AddParameterBlock(
+    double* values,
+    int size,
+    LocalParameterization* local_parameterization) {
+  ParameterBlock* parameter_block =
+      InternalAddParameterBlock(values, size);
+  if (local_parameterization != NULL) {
+    parameter_block->SetParameterization(local_parameterization);
+  }
+}
+
+// Delete a block from a vector of blocks, maintaining the indexing invariant.
+// This is done in constant time by moving an element from the end of the
+// vector over the element to remove, then popping the last element. It
+// destroys the ordering in the interest of speed.
+template<typename Block>
+void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
+                                      Block* block_to_remove) {
+  CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove)
+      << "You found a Ceres bug! \n"
+      << "Block requested: "
+      << block_to_remove->ToString() << "\n"
+      << "Block present: "
+      << (*mutable_blocks)[block_to_remove->index()]->ToString();
+
+  // Prepare the to-be-moved block for the new, lower-in-index position by
+  // setting the index to the blocks final location.
+  Block* tmp = mutable_blocks->back();
+  tmp->set_index(block_to_remove->index());
+
+  // Overwrite the to-be-deleted residual block with the one at the end.
+  (*mutable_blocks)[block_to_remove->index()] = tmp;
+
+  DeleteBlock(block_to_remove);
+
+  // The block is gone so shrink the vector of blocks accordingly.
+  mutable_blocks->pop_back();
+}
+
+void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
+  CHECK(residual_block != nullptr);
+
+  // Verify that residual_block identifies a residual in the current problem.
+  const string residual_not_found_message =
+      StringPrintf("Residual block to remove: %p not found. This usually means "
+                   "one of three things have happened:\n"
+                   " 1) residual_block is uninitialised and points to a random "
+                   "area in memory.\n"
+                   " 2) residual_block represented a residual that was added to"
+                   " the problem, but referred to a parameter block which has "
+                   "since been removed, which removes all residuals which "
+                   "depend on that parameter block, and was thus removed.\n"
+                   " 3) residual_block referred to a residual that has already "
+                   "been removed from the problem (by the user).",
+                   residual_block);
+  if (options_.enable_fast_removal) {
+    CHECK(residual_block_set_.find(residual_block) !=
+          residual_block_set_.end())
+        << residual_not_found_message;
+  } else {
+    // Perform a full search over all current residuals.
+    CHECK(std::find(program_->residual_blocks().begin(),
+                    program_->residual_blocks().end(),
+                    residual_block) != program_->residual_blocks().end())
+        << residual_not_found_message;
+  }
+
+  InternalRemoveResidualBlock(residual_block);
+}
+
+void ProblemImpl::RemoveParameterBlock(double* values) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "it can be removed.";
+  }
+
+  if (options_.enable_fast_removal) {
+    // Copy the dependent residuals from the parameter block because the set of
+    // dependents will change after each call to RemoveResidualBlock().
+    vector<ResidualBlock*> residual_blocks_to_remove(
+        parameter_block->mutable_residual_blocks()->begin(),
+        parameter_block->mutable_residual_blocks()->end());
+    for (int i = 0; i < residual_blocks_to_remove.size(); ++i) {
+      InternalRemoveResidualBlock(residual_blocks_to_remove[i]);
+    }
+  } else {
+    // Scan all the residual blocks to remove ones that depend on the parameter
+    // block. Do the scan backwards since the vector changes while iterating.
+    const int num_residual_blocks = NumResidualBlocks();
+    for (int i = num_residual_blocks - 1; i >= 0; --i) {
+      ResidualBlock* residual_block =
+          (*(program_->mutable_residual_blocks()))[i];
+      const int num_parameter_blocks = residual_block->NumParameterBlocks();
+      for (int j = 0; j < num_parameter_blocks; ++j) {
+        if (residual_block->parameter_blocks()[j] == parameter_block) {
+          InternalRemoveResidualBlock(residual_block);
+          // The parameter blocks are guaranteed unique.
+          break;
+        }
+      }
+    }
+  }
+  DeleteBlockInVector(program_->mutable_parameter_blocks(), parameter_block);
+}
+
+void ProblemImpl::SetParameterBlockConstant(double* values) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "it can be set constant.";
+  }
+
+  parameter_block->SetConstant();
+}
+
+bool ProblemImpl::IsParameterBlockConstant(double* values) const {
+  const ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  CHECK(parameter_block != NULL)
+    << "Parameter block not found: " << values << ". You must add the "
+    << "parameter block to the problem before it can be queried.";
+
+  return parameter_block->IsSetConstantByUser();
+}
+
+void ProblemImpl::SetParameterBlockVariable(double* values) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "it can be set varying.";
+  }
+
+  parameter_block->SetVarying();
+}
+
+void ProblemImpl::SetParameterization(
+    double* values,
+    LocalParameterization* local_parameterization) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can set its local parameterization.";
+  }
+
+  parameter_block->SetParameterization(local_parameterization);
+}
+
+const LocalParameterization* ProblemImpl::GetParameterization(
+    double* values) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can get its local parameterization.";
+  }
+
+  return parameter_block->local_parameterization();
+}
+
+void ProblemImpl::SetParameterLowerBound(double* values,
+                                         int index,
+                                         double lower_bound) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can set a lower bound on one of its components.";
+  }
+
+  parameter_block->SetLowerBound(index, lower_bound);
+}
+
+void ProblemImpl::SetParameterUpperBound(double* values,
+                                         int index,
+                                         double upper_bound) {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can set an upper bound on one of its components.";
+  }
+  parameter_block->SetUpperBound(index, upper_bound);
+}
+
+double ProblemImpl::GetParameterLowerBound(double* values, int index) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can get the lower bound on one of its components.";
+  }
+  return parameter_block->LowerBound(index);
+}
+
+double ProblemImpl::GetParameterUpperBound(double* values, int index) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, values, NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can set an upper bound on one of its components.";
+  }
+  return parameter_block->UpperBound(index);
+}
+
+bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
+                           double* cost,
+                           vector<double>* residuals,
+                           vector<double>* gradient,
+                           CRSMatrix* jacobian) {
+  if (cost == NULL &&
+      residuals == NULL &&
+      gradient == NULL &&
+      jacobian == NULL) {
+    LOG(INFO) << "Nothing to do.";
+    return true;
+  }
+
+  // If the user supplied residual blocks, then use them, otherwise
+  // take the residual blocks from the underlying program.
+  Program program;
+  *program.mutable_residual_blocks() =
+      ((evaluate_options.residual_blocks.size() > 0)
+       ? evaluate_options.residual_blocks : program_->residual_blocks());
+
+  const vector<double*>& parameter_block_ptrs =
+      evaluate_options.parameter_blocks;
+
+  vector<ParameterBlock*> variable_parameter_blocks;
+  vector<ParameterBlock*>& parameter_blocks =
+      *program.mutable_parameter_blocks();
+
+  if (parameter_block_ptrs.size() == 0) {
+    // The user did not provide any parameter blocks, so default to
+    // using all the parameter blocks in the order that they are in
+    // the underlying program object.
+    parameter_blocks = program_->parameter_blocks();
+  } else {
+    // The user supplied a vector of parameter blocks. Using this list
+    // requires a number of steps.
+
+    // 1. Convert double* into ParameterBlock*
+    parameter_blocks.resize(parameter_block_ptrs.size());
+    for (int i = 0; i < parameter_block_ptrs.size(); ++i) {
+      parameter_blocks[i] = FindWithDefault(parameter_block_map_,
+                                            parameter_block_ptrs[i],
+                                            NULL);
+      if (parameter_blocks[i] == NULL) {
+        LOG(FATAL) << "No known parameter block for "
+                   << "Problem::Evaluate::Options.parameter_blocks[" << i << "]"
+                   << " = " << parameter_block_ptrs[i];
+      }
+    }
+
+    // 2. The user may have only supplied a subset of parameter
+    // blocks, so identify the ones that are not supplied by the user
+    // and are NOT constant. These parameter blocks are stored in
+    // variable_parameter_blocks.
+    //
+    // To ensure that the parameter blocks are not included in the
+    // columns of the jacobian, we need to make sure that they are
+    // constant during evaluation and then make them variable again
+    // after we are done.
+    vector<ParameterBlock*> all_parameter_blocks(program_->parameter_blocks());
+    vector<ParameterBlock*> included_parameter_blocks(
+        program.parameter_blocks());
+
+    vector<ParameterBlock*> excluded_parameter_blocks;
+    sort(all_parameter_blocks.begin(), all_parameter_blocks.end());
+    sort(included_parameter_blocks.begin(), included_parameter_blocks.end());
+    set_difference(all_parameter_blocks.begin(),
+                   all_parameter_blocks.end(),
+                   included_parameter_blocks.begin(),
+                   included_parameter_blocks.end(),
+                   back_inserter(excluded_parameter_blocks));
+
+    variable_parameter_blocks.reserve(excluded_parameter_blocks.size());
+    for (int i = 0; i < excluded_parameter_blocks.size(); ++i) {
+      ParameterBlock* parameter_block = excluded_parameter_blocks[i];
+      if (!parameter_block->IsConstant()) {
+        variable_parameter_blocks.push_back(parameter_block);
+        parameter_block->SetConstant();
+      }
+    }
+  }
+
+  // Setup the Parameter indices and offsets before an evaluator can
+  // be constructed and used.
+  program.SetParameterOffsetsAndIndex();
+
+  Evaluator::Options evaluator_options;
+
+  // Even though using SPARSE_NORMAL_CHOLESKY requires SuiteSparse or
+  // CXSparse, here it just being used for telling the evaluator to
+  // use a SparseRowCompressedMatrix for the jacobian. This is because
+  // the Evaluator decides the storage for the Jacobian based on the
+  // type of linear solver being used.
+  evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+#ifdef CERES_NO_THREADS
+  LOG_IF(WARNING, evaluate_options.num_threads > 1)
+      << "No threading support is compiled into this binary; "
+      << "only evaluate_options.num_threads = 1 is supported. Switching "
+      << "to single threaded mode.";
+  evaluator_options.num_threads = 1;
+#else
+  evaluator_options.num_threads = evaluate_options.num_threads;
+#endif  // CERES_NO_THREADS
+
+  // The main thread also does work so we only need to launch num_threads - 1.
+  context_impl_->EnsureMinimumThreads(evaluator_options.num_threads - 1);
+  evaluator_options.context = context_impl_;
+
+  std::unique_ptr<Evaluator> evaluator(
+      new ProgramEvaluator<ScratchEvaluatePreparer,
+                           CompressedRowJacobianWriter>(evaluator_options,
+                                                        &program));
+
+  if (residuals !=NULL) {
+    residuals->resize(evaluator->NumResiduals());
+  }
+
+  if (gradient != NULL) {
+    gradient->resize(evaluator->NumEffectiveParameters());
+  }
+
+  std::unique_ptr<CompressedRowSparseMatrix> tmp_jacobian;
+  if (jacobian != NULL) {
+    tmp_jacobian.reset(
+        down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
+  }
+
+  // Point the state pointers to the user state pointers. This is
+  // needed so that we can extract a parameter vector which is then
+  // passed to Evaluator::Evaluate.
+  program.SetParameterBlockStatePtrsToUserStatePtrs();
+
+  // Copy the value of the parameter blocks into a vector, since the
+  // Evaluate::Evaluate method needs its input as such. The previous
+  // call to SetParameterBlockStatePtrsToUserStatePtrs ensures that
+  // these values are the ones corresponding to the actual state of
+  // the parameter blocks, rather than the temporary state pointer
+  // used for evaluation.
+  Vector parameters(program.NumParameters());
+  program.ParameterBlocksToStateVector(parameters.data());
+
+  double tmp_cost = 0;
+
+  Evaluator::EvaluateOptions evaluator_evaluate_options;
+  evaluator_evaluate_options.apply_loss_function =
+      evaluate_options.apply_loss_function;
+  bool status = evaluator->Evaluate(evaluator_evaluate_options,
+                                    parameters.data(),
+                                    &tmp_cost,
+                                    residuals != NULL ? &(*residuals)[0] : NULL,
+                                    gradient != NULL ? &(*gradient)[0] : NULL,
+                                    tmp_jacobian.get());
+
+  // Make the parameter blocks that were temporarily marked constant,
+  // variable again.
+  for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
+    variable_parameter_blocks[i]->SetVarying();
+  }
+
+  if (status) {
+    if (cost != NULL) {
+      *cost = tmp_cost;
+    }
+    if (jacobian != NULL) {
+      tmp_jacobian->ToCRSMatrix(jacobian);
+    }
+  }
+
+  program_->SetParameterBlockStatePtrsToUserStatePtrs();
+  program_->SetParameterOffsetsAndIndex();
+  return status;
+}
+
+int ProblemImpl::NumParameterBlocks() const {
+  return program_->NumParameterBlocks();
+}
+
+int ProblemImpl::NumParameters() const {
+  return program_->NumParameters();
+}
+
+int ProblemImpl::NumResidualBlocks() const {
+  return program_->NumResidualBlocks();
+}
+
+int ProblemImpl::NumResiduals() const {
+  return program_->NumResiduals();
+}
+
+int ProblemImpl::ParameterBlockSize(const double* values) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can get its size.";
+  }
+
+  return parameter_block->Size();
+}
+
+int ProblemImpl::ParameterBlockLocalSize(const double* values) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can get its local size.";
+  }
+
+  return parameter_block->LocalSize();
+}
+
+bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
+  return (parameter_block_map_.find(const_cast<double*>(parameter_block)) !=
+          parameter_block_map_.end());
+}
+
+void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
+  CHECK(parameter_blocks != nullptr);
+  parameter_blocks->resize(0);
+  parameter_blocks->reserve(parameter_block_map_.size());
+  for (const auto& entry : parameter_block_map_) {
+    parameter_blocks->push_back(entry.first);
+  }
+}
+
+void ProblemImpl::GetResidualBlocks(
+    vector<ResidualBlockId>* residual_blocks) const {
+  CHECK(residual_blocks != nullptr);
+  *residual_blocks = program().residual_blocks();
+}
+
+void ProblemImpl::GetParameterBlocksForResidualBlock(
+    const ResidualBlockId residual_block,
+    vector<double*>* parameter_blocks) const {
+  int num_parameter_blocks = residual_block->NumParameterBlocks();
+  CHECK(parameter_blocks != nullptr);
+  parameter_blocks->resize(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    (*parameter_blocks)[i] =
+        residual_block->parameter_blocks()[i]->mutable_user_state();
+  }
+}
+
+const CostFunction* ProblemImpl::GetCostFunctionForResidualBlock(
+    const ResidualBlockId residual_block) const {
+  return residual_block->cost_function();
+}
+
+const LossFunction* ProblemImpl::GetLossFunctionForResidualBlock(
+    const ResidualBlockId residual_block) const {
+  return residual_block->loss_function();
+}
+
+void ProblemImpl::GetResidualBlocksForParameterBlock(
+    const double* values,
+    vector<ResidualBlockId>* residual_blocks) const {
+  ParameterBlock* parameter_block =
+      FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
+  if (parameter_block == NULL) {
+    LOG(FATAL) << "Parameter block not found: " << values
+               << ". You must add the parameter block to the problem before "
+               << "you can get the residual blocks that depend on it.";
+  }
+
+  if (options_.enable_fast_removal) {
+    // In this case the residual blocks that depend on the parameter block are
+    // stored in the parameter block already, so just copy them out.
+    CHECK(residual_blocks != nullptr);
+    residual_blocks->resize(parameter_block->mutable_residual_blocks()->size());
+    std::copy(parameter_block->mutable_residual_blocks()->begin(),
+              parameter_block->mutable_residual_blocks()->end(),
+              residual_blocks->begin());
+    return;
+  }
+
+  // Find residual blocks that depend on the parameter block.
+  CHECK(residual_blocks != nullptr);
+  residual_blocks->clear();
+  const int num_residual_blocks = NumResidualBlocks();
+  for (int i = 0; i < num_residual_blocks; ++i) {
+    ResidualBlock* residual_block =
+        (*(program_->mutable_residual_blocks()))[i];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (residual_block->parameter_blocks()[j] == parameter_block) {
+        residual_blocks->push_back(residual_block);
+        // The parameter blocks are guaranteed unique.
+        break;
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
new file mode 100644
index 0000000..eabeaed
--- /dev/null
+++ b/internal/ceres/problem_impl.h
@@ -0,0 +1,209 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// This is the implementation of the public Problem API. The pointer to
+// implementation (PIMPL) idiom makes it possible for Ceres internal code to
+// refer to the private data members without needing to exposing it to the
+// world. An alternative to PIMPL is to have a factory which returns instances
+// of a virtual base class; while that approach would work, it requires clients
+// to always put a Problem object into a scoped pointer; this needlessly muddies
+// client code for little benefit. Therefore, the PIMPL comprise was chosen.
+
+#ifndef CERES_PUBLIC_PROBLEM_IMPL_H_
+#define CERES_PUBLIC_PROBLEM_IMPL_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "ceres/context_impl.h"
+#include "ceres/internal/port.h"
+#include "ceres/problem.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class CostFunction;
+class LossFunction;
+class LocalParameterization;
+struct CRSMatrix;
+
+namespace internal {
+
+class Program;
+class ResidualBlock;
+
+class ProblemImpl {
+ public:
+  typedef std::map<double*, ParameterBlock*> ParameterMap;
+  typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
+  typedef std::map<CostFunction*, int> CostFunctionRefCount;
+  typedef std::map<LossFunction*, int> LossFunctionRefCount;
+
+  ProblemImpl();
+  explicit ProblemImpl(const Problem::Options& options);
+  ProblemImpl(const ProblemImpl&) = delete;
+  void operator=(const ProblemImpl&) = delete;
+
+  ~ProblemImpl();
+
+  // See the public problem.h file for description of these methods.
+  ResidualBlockId AddResidualBlock(
+      CostFunction* cost_function,
+      LossFunction* loss_function,
+      double* const* const parameter_blocks,
+      int num_parameter_blocks);
+
+  template <typename... Ts>
+  ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+                                   LossFunction* loss_function,
+                                   double* x0,
+                                   Ts*... xs) {
+    const std::array<double*, sizeof...(Ts) + 1> parameter_blocks{{x0, xs...}};
+    return AddResidualBlock(cost_function,
+                            loss_function,
+                            parameter_blocks.data(),
+                            static_cast<int>(parameter_blocks.size()));
+  }
+
+  void AddParameterBlock(double* values, int size);
+  void AddParameterBlock(double* values,
+                         int size,
+                         LocalParameterization* local_parameterization);
+
+  void RemoveResidualBlock(ResidualBlock* residual_block);
+  void RemoveParameterBlock(double* values);
+
+  void SetParameterBlockConstant(double* values);
+  void SetParameterBlockVariable(double* values);
+  bool IsParameterBlockConstant(double* values) const;
+
+  void SetParameterization(double* values,
+                           LocalParameterization* local_parameterization);
+  const LocalParameterization* GetParameterization(double* values) const;
+
+  void SetParameterLowerBound(double* values, int index, double lower_bound);
+  void SetParameterUpperBound(double* values, int index, double upper_bound);
+  double GetParameterLowerBound(double* values, int index) const;
+  double GetParameterUpperBound(double* values, int index) const;
+
+  bool Evaluate(const Problem::EvaluateOptions& options,
+                double* cost,
+                std::vector<double>* residuals,
+                std::vector<double>* gradient,
+                CRSMatrix* jacobian);
+
+  int NumParameterBlocks() const;
+  int NumParameters() const;
+  int NumResidualBlocks() const;
+  int NumResiduals() const;
+
+  int ParameterBlockSize(const double* parameter_block) const;
+  int ParameterBlockLocalSize(const double* parameter_block) const;
+
+  bool HasParameterBlock(const double* parameter_block) const;
+
+  void GetParameterBlocks(std::vector<double*>* parameter_blocks) const;
+  void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const;
+
+  void GetParameterBlocksForResidualBlock(
+      const ResidualBlockId residual_block,
+      std::vector<double*>* parameter_blocks) const;
+
+  const CostFunction* GetCostFunctionForResidualBlock(
+      const ResidualBlockId residual_block) const;
+  const LossFunction* GetLossFunctionForResidualBlock(
+      const ResidualBlockId residual_block) const;
+
+  void GetResidualBlocksForParameterBlock(
+      const double* values,
+      std::vector<ResidualBlockId>* residual_blocks) const;
+
+  const Program& program() const { return *program_; }
+  Program* mutable_program() { return program_.get(); }
+
+  const ParameterMap& parameter_map() const { return parameter_block_map_; }
+  const ResidualBlockSet& residual_block_set() const {
+    CHECK(options_.enable_fast_removal)
+        << "Fast removal not enabled, residual_block_set is not maintained.";
+    return residual_block_set_;
+  }
+
+  ContextImpl* context() { return context_impl_; }
+
+ private:
+  ParameterBlock* InternalAddParameterBlock(double* values, int size);
+  void InternalRemoveResidualBlock(ResidualBlock* residual_block);
+
+  // Delete the arguments in question. These differ from the Remove* functions
+  // in that they do not clean up references to the block to delete; they
+  // merely delete them.
+  template <typename Block>
+  void DeleteBlockInVector(std::vector<Block*>* mutable_blocks,
+                           Block* block_to_remove);
+  void DeleteBlock(ResidualBlock* residual_block);
+  void DeleteBlock(ParameterBlock* parameter_block);
+
+  const Problem::Options options_;
+
+  bool context_impl_owned_;
+  ContextImpl* context_impl_;
+
+  // The mapping from user pointers to parameter blocks.
+  ParameterMap parameter_block_map_;
+
+  // Iff enable_fast_removal is enabled, contains the current residual blocks.
+  ResidualBlockSet residual_block_set_;
+
+  // The actual parameter and residual blocks.
+  std::unique_ptr<internal::Program> program_;
+
+  // When removing parameter blocks, parameterizations have ambiguous
+  // ownership. Instead of scanning the entire problem to see if the
+  // parameterization is shared with other parameter blocks, buffer
+  // them until destruction.
+  //
+  // TODO(keir): See if it makes sense to use sets instead.
+  std::vector<LocalParameterization*> local_parameterizations_to_delete_;
+
+  // For each cost function and loss function in the problem, a count
+  // of the number of residual blocks that refer to them. When the
+  // count goes to zero and the problem owns these objects, they are
+  // destroyed.
+  CostFunctionRefCount cost_function_ref_count_;
+  LossFunctionRefCount loss_function_ref_count_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_PUBLIC_PROBLEM_IMPL_H_
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
new file mode 100644
index 0000000..3f9f804
--- /dev/null
+++ b/internal/ceres/problem_test.cc
@@ -0,0 +1,1586 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         keir@google.com (Keir Mierle)
+
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+#include <memory>
+#include "ceres/casts.h"
+#include "ceres/cost_function.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/evaluator_test_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// The following three classes are for the purposes of defining
+// function signatures. They have dummy Evaluate functions.
+
+// Trivial cost function that accepts a single argument.
+class UnaryCostFunction : public CostFunction {
+ public:
+  UnaryCostFunction(int num_residuals, int32_t parameter_block_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block_size);
+  }
+  virtual ~UnaryCostFunction() {}
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 1;
+    }
+    return true;
+  }
+};
+
+// Trivial cost function that accepts two arguments.
+class BinaryCostFunction: public CostFunction {
+ public:
+  BinaryCostFunction(int num_residuals,
+                     int32_t parameter_block1_size,
+                     int32_t parameter_block2_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 2;
+    }
+    return true;
+  }
+};
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+  TernaryCostFunction(int num_residuals,
+                      int32_t parameter_block1_size,
+                      int32_t parameter_block2_size,
+                      int32_t parameter_block3_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = 3;
+    }
+    return true;
+  }
+};
+
+TEST(Problem, AddResidualWithNullCostFunctionDies) {
+  double x[3], y[4], z[5];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+  problem.AddParameterBlock(z, 5);
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(NULL, NULL, x),
+                            "cost_function != nullptr");
+}
+
+TEST(Problem, AddResidualWithIncorrectNumberOfParameterBlocksDies) {
+  double x[3], y[4], z[5];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+  problem.AddParameterBlock(z, 5);
+
+  // UnaryCostFunction takes only one parameter, but two are passed.
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x, y),
+      "num_parameter_blocks");
+}
+
+TEST(Problem, AddResidualWithDifferentSizesOnTheSameVariableDies) {
+  double x[3];
+
+  Problem problem;
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+                                new UnaryCostFunction(
+                                    2, 4 /* 4 != 3 */), NULL, x),
+                            "different block sizes");
+}
+
+TEST(Problem, AddResidualWithDuplicateParametersDies) {
+  double x[3], z[5];
+
+  Problem problem;
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+                                new BinaryCostFunction(2, 3, 3), NULL, x, x),
+                            "Duplicate parameter blocks");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+                                new TernaryCostFunction(1, 5, 3, 5),
+                                NULL, z, x, z),
+                            "Duplicate parameter blocks");
+}
+
+TEST(Problem, AddResidualWithIncorrectSizesOfParameterBlockDies) {
+  double x[3], y[4], z[5];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+  problem.AddParameterBlock(z, 5);
+
+  // The cost function expects the size of the second parameter, z, to be 4
+  // instead of 5 as declared above. This is fatal.
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+      new BinaryCostFunction(2, 3, 4), NULL, x, z),
+               "different block sizes");
+}
+
+TEST(Problem, AddResidualAddsDuplicatedParametersOnlyOnce) {
+  double x[3], y[4], z[5];
+
+  Problem problem;
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 4), NULL, y);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 5), NULL, z);
+
+  EXPECT_EQ(3, problem.NumParameterBlocks());
+  EXPECT_EQ(12, problem.NumParameters());
+}
+
+TEST(Problem, AddParameterWithDifferentSizesOnTheSameVariableDies) {
+  double x[3], y[4];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(x, 4),
+                            "different block sizes");
+}
+
+static double *IntToPtr(int i) {
+  return reinterpret_cast<double*>(sizeof(double) * i);  // NOLINT
+}
+
+TEST(Problem, AddParameterWithAliasedParametersDies) {
+  // Layout is
+  //
+  //   0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17
+  //                 [x] x  x  x  x          [y] y  y
+  //         o==o==o                 o==o==o           o==o
+  //               o--o--o     o--o--o     o--o  o--o--o
+  //
+  // Parameter block additions are tested as listed above; expected successful
+  // ones marked with o==o and aliasing ones marked with o--o.
+
+  Problem problem;
+  problem.AddParameterBlock(IntToPtr(5),  5);  // x
+  problem.AddParameterBlock(IntToPtr(13), 3);  // y
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 2),
+                            "Aliasing detected");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 3),
+                            "Aliasing detected");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 9),
+                            "Aliasing detected");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 8), 3),
+                            "Aliasing detected");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(12), 2),
+                            "Aliasing detected");
+  EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(14), 3),
+                            "Aliasing detected");
+
+  // These ones should work.
+  problem.AddParameterBlock(IntToPtr( 2), 3);
+  problem.AddParameterBlock(IntToPtr(10), 3);
+  problem.AddParameterBlock(IntToPtr(16), 2);
+
+  ASSERT_EQ(5, problem.NumParameterBlocks());
+}
+
+TEST(Problem, AddParameterIgnoresDuplicateCalls) {
+  double x[3], y[4];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+
+  // Creating parameter blocks multiple times is ignored.
+  problem.AddParameterBlock(x, 3);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+
+  // ... even repeatedly.
+  problem.AddParameterBlock(x, 3);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+
+  // More parameters are fine.
+  problem.AddParameterBlock(y, 4);
+  problem.AddResidualBlock(new UnaryCostFunction(2, 4), NULL, y);
+
+  EXPECT_EQ(2, problem.NumParameterBlocks());
+  EXPECT_EQ(7, problem.NumParameters());
+}
+
+TEST(Problem, AddingParametersAndResidualsResultsInExpectedProblem) {
+  double x[3], y[4], z[5], w[4];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  EXPECT_EQ(1, problem.NumParameterBlocks());
+  EXPECT_EQ(3, problem.NumParameters());
+
+  problem.AddParameterBlock(y, 4);
+  EXPECT_EQ(2, problem.NumParameterBlocks());
+  EXPECT_EQ(7, problem.NumParameters());
+
+  problem.AddParameterBlock(z, 5);
+  EXPECT_EQ(3,  problem.NumParameterBlocks());
+  EXPECT_EQ(12, problem.NumParameters());
+
+  // Add a parameter that has a local parameterization.
+  w[0] = 1.0; w[1] = 0.0; w[2] = 0.0; w[3] = 0.0;
+  problem.AddParameterBlock(w, 4, new QuaternionParameterization);
+  EXPECT_EQ(4,  problem.NumParameterBlocks());
+  EXPECT_EQ(16, problem.NumParameters());
+
+  problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+  problem.AddResidualBlock(new BinaryCostFunction(6, 5, 4) , NULL, z, y);
+  problem.AddResidualBlock(new BinaryCostFunction(3, 3, 5), NULL, x, z);
+  problem.AddResidualBlock(new BinaryCostFunction(7, 5, 3), NULL, z, x);
+  problem.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4), NULL, z, x, y);
+
+  const int total_residuals = 2 + 6 + 3 + 7 + 1;
+  EXPECT_EQ(problem.NumResidualBlocks(), 5);
+  EXPECT_EQ(problem.NumResiduals(), total_residuals);
+}
+
+class DestructorCountingCostFunction : public SizedCostFunction<3, 4, 5> {
+ public:
+  explicit DestructorCountingCostFunction(int *num_destructions)
+      : num_destructions_(num_destructions) {}
+
+  virtual ~DestructorCountingCostFunction() {
+    *num_destructions_ += 1;
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return true;
+  }
+
+ private:
+  int* num_destructions_;
+};
+
+TEST(Problem, ReusedCostFunctionsAreOnlyDeletedOnce) {
+  double y[4], z[5];
+  int num_destructions = 0;
+
+  // Add a cost function multiple times and check to make sure that
+  // the destructor on the cost function is only called once.
+  {
+    Problem problem;
+    problem.AddParameterBlock(y, 4);
+    problem.AddParameterBlock(z, 5);
+
+    CostFunction* cost = new DestructorCountingCostFunction(&num_destructions);
+    problem.AddResidualBlock(cost, NULL, y, z);
+    problem.AddResidualBlock(cost, NULL, y, z);
+    problem.AddResidualBlock(cost, NULL, y, z);
+    EXPECT_EQ(3, problem.NumResidualBlocks());
+  }
+
+  // Check that the destructor was called only once.
+  CHECK_EQ(num_destructions, 1);
+}
+
+TEST(Problem, GetCostFunctionForResidualBlock) {
+  double x[3];
+  Problem problem;
+  CostFunction* cost_function = new UnaryCostFunction(2, 3);
+  const ResidualBlockId residual_block =
+      problem.AddResidualBlock(cost_function, NULL, x);
+  EXPECT_EQ(problem.GetCostFunctionForResidualBlock(residual_block),
+            cost_function);
+  EXPECT_TRUE(problem.GetLossFunctionForResidualBlock(residual_block) == NULL);
+}
+
+TEST(Problem, GetLossFunctionForResidualBlock) {
+  double x[3];
+  Problem problem;
+  CostFunction* cost_function = new UnaryCostFunction(2, 3);
+  LossFunction* loss_function = new TrivialLoss();
+  const ResidualBlockId residual_block =
+      problem.AddResidualBlock(cost_function, loss_function, x);
+  EXPECT_EQ(problem.GetCostFunctionForResidualBlock(residual_block),
+            cost_function);
+  EXPECT_EQ(problem.GetLossFunctionForResidualBlock(residual_block),
+            loss_function);
+}
+
+TEST(Problem, CostFunctionsAreDeletedEvenWithRemovals) {
+  double y[4], z[5], w[4];
+  int num_destructions = 0;
+  {
+    Problem problem;
+    problem.AddParameterBlock(y, 4);
+    problem.AddParameterBlock(z, 5);
+
+    CostFunction* cost_yz =
+        new DestructorCountingCostFunction(&num_destructions);
+    CostFunction* cost_wz =
+        new DestructorCountingCostFunction(&num_destructions);
+    ResidualBlock* r_yz = problem.AddResidualBlock(cost_yz, NULL, y, z);
+    ResidualBlock* r_wz = problem.AddResidualBlock(cost_wz, NULL, w, z);
+    EXPECT_EQ(2, problem.NumResidualBlocks());
+
+    problem.RemoveResidualBlock(r_yz);
+    CHECK_EQ(num_destructions, 1);
+    problem.RemoveResidualBlock(r_wz);
+    CHECK_EQ(num_destructions, 2);
+
+    EXPECT_EQ(0, problem.NumResidualBlocks());
+  }
+  CHECK_EQ(num_destructions, 2);
+}
+
+// Make the dynamic problem tests (e.g. for removing residual blocks)
+// parameterized on whether the low-latency mode is enabled or not.
+//
+// This tests against ProblemImpl instead of Problem in order to inspect the
+// state of the resulting Program; this is difficult with only the thin Problem
+// interface.
+struct DynamicProblem : public ::testing::TestWithParam<bool> {
+  DynamicProblem() {
+    Problem::Options options;
+    options.enable_fast_removal = GetParam();
+    problem.reset(new ProblemImpl(options));
+  }
+
+  ParameterBlock* GetParameterBlock(int block) {
+    return problem->program().parameter_blocks()[block];
+  }
+  ResidualBlock* GetResidualBlock(int block) {
+    return problem->program().residual_blocks()[block];
+  }
+
+  bool HasResidualBlock(ResidualBlock* residual_block) {
+    bool have_residual_block = true;
+    if (GetParam()) {
+      have_residual_block &=
+          (problem->residual_block_set().find(residual_block) !=
+           problem->residual_block_set().end());
+    }
+    have_residual_block &=
+        find(problem->program().residual_blocks().begin(),
+             problem->program().residual_blocks().end(),
+             residual_block) != problem->program().residual_blocks().end();
+    return have_residual_block;
+  }
+
+  int NumResidualBlocks() {
+    // Verify that the hash set of residuals is maintained consistently.
+    if (GetParam()) {
+      EXPECT_EQ(problem->residual_block_set().size(),
+                problem->NumResidualBlocks());
+    }
+    return problem->NumResidualBlocks();
+  }
+
+  // The next block of functions until the end are only for testing the
+  // residual block removals.
+  void ExpectParameterBlockContainsResidualBlock(
+      double* values,
+      ResidualBlock* residual_block) {
+    ParameterBlock* parameter_block =
+        FindOrDie(problem->parameter_map(), values);
+    EXPECT_TRUE(ContainsKey(*(parameter_block->mutable_residual_blocks()),
+                            residual_block));
+  }
+
+  void ExpectSize(double* values, int size) {
+    ParameterBlock* parameter_block =
+        FindOrDie(problem->parameter_map(), values);
+    EXPECT_EQ(size, parameter_block->mutable_residual_blocks()->size());
+  }
+
+  // Degenerate case.
+  void ExpectParameterBlockContains(double* values) {
+    ExpectSize(values, 0);
+  }
+
+  void ExpectParameterBlockContains(double* values,
+                                    ResidualBlock* r1) {
+    ExpectSize(values, 1);
+    ExpectParameterBlockContainsResidualBlock(values, r1);
+  }
+
+  void ExpectParameterBlockContains(double* values,
+                                    ResidualBlock* r1,
+                                    ResidualBlock* r2) {
+    ExpectSize(values, 2);
+    ExpectParameterBlockContainsResidualBlock(values, r1);
+    ExpectParameterBlockContainsResidualBlock(values, r2);
+  }
+
+  void ExpectParameterBlockContains(double* values,
+                                    ResidualBlock* r1,
+                                    ResidualBlock* r2,
+                                    ResidualBlock* r3) {
+    ExpectSize(values, 3);
+    ExpectParameterBlockContainsResidualBlock(values, r1);
+    ExpectParameterBlockContainsResidualBlock(values, r2);
+    ExpectParameterBlockContainsResidualBlock(values, r3);
+  }
+
+  void ExpectParameterBlockContains(double* values,
+                                    ResidualBlock* r1,
+                                    ResidualBlock* r2,
+                                    ResidualBlock* r3,
+                                    ResidualBlock* r4) {
+    ExpectSize(values, 4);
+    ExpectParameterBlockContainsResidualBlock(values, r1);
+    ExpectParameterBlockContainsResidualBlock(values, r2);
+    ExpectParameterBlockContainsResidualBlock(values, r3);
+    ExpectParameterBlockContainsResidualBlock(values, r4);
+  }
+
+  std::unique_ptr<ProblemImpl> problem;
+  double y[4], z[5], w[3];
+};
+
+TEST(Problem, SetParameterBlockConstantWithUnknownPtrDies) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.SetParameterBlockConstant(y),
+                            "Parameter block not found:");
+}
+
+TEST(Problem, SetParameterBlockVariableWithUnknownPtrDies) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.SetParameterBlockVariable(y),
+                            "Parameter block not found:");
+}
+
+TEST(Problem, IsParameterBlockConstant) {
+  double x1[3];
+  double x2[3];
+
+  Problem problem;
+  problem.AddParameterBlock(x1, 3);
+  problem.AddParameterBlock(x2, 3);
+
+  EXPECT_FALSE(problem.IsParameterBlockConstant(x1));
+  EXPECT_FALSE(problem.IsParameterBlockConstant(x2));
+
+  problem.SetParameterBlockConstant(x1);
+  EXPECT_TRUE(problem.IsParameterBlockConstant(x1));
+  EXPECT_FALSE(problem.IsParameterBlockConstant(x2));
+
+  problem.SetParameterBlockConstant(x2);
+  EXPECT_TRUE(problem.IsParameterBlockConstant(x1));
+  EXPECT_TRUE(problem.IsParameterBlockConstant(x2));
+
+  problem.SetParameterBlockVariable(x1);
+  EXPECT_FALSE(problem.IsParameterBlockConstant(x1));
+  EXPECT_TRUE(problem.IsParameterBlockConstant(x2));
+}
+
+TEST(Problem, IsParameterBlockConstantWithUnknownPtrDies) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+
+  EXPECT_DEATH_IF_SUPPORTED(problem.IsParameterBlockConstant(y),
+                            "Parameter block not found:");
+}
+
+TEST(Problem, SetLocalParameterizationWithUnknownPtrDies) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.SetParameterization(y, new IdentityParameterization(3)),
+      "Parameter block not found:");
+}
+
+TEST(Problem, RemoveParameterBlockWithUnknownPtrDies) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+
+  EXPECT_DEATH_IF_SUPPORTED(
+      problem.RemoveParameterBlock(y), "Parameter block not found:");
+}
+
+TEST(Problem, GetParameterization) {
+  double x[3];
+  double y[2];
+
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 2);
+
+  LocalParameterization* parameterization =  new IdentityParameterization(3);
+  problem.SetParameterization(x, parameterization);
+  EXPECT_EQ(problem.GetParameterization(x), parameterization);
+  EXPECT_TRUE(problem.GetParameterization(y) == NULL);
+}
+
+TEST(Problem, ParameterBlockQueryTest) {
+  double x[3];
+  double y[4];
+  Problem problem;
+  problem.AddParameterBlock(x, 3);
+  problem.AddParameterBlock(y, 4);
+
+  vector<int> constant_parameters;
+  constant_parameters.push_back(0);
+  problem.SetParameterization(
+      x,
+      new SubsetParameterization(3, constant_parameters));
+  EXPECT_EQ(problem.ParameterBlockSize(x), 3);
+  EXPECT_EQ(problem.ParameterBlockLocalSize(x), 2);
+  EXPECT_EQ(problem.ParameterBlockLocalSize(y), 4);
+
+  vector<double*> parameter_blocks;
+  problem.GetParameterBlocks(&parameter_blocks);
+  EXPECT_EQ(parameter_blocks.size(), 2);
+  EXPECT_NE(parameter_blocks[0], parameter_blocks[1]);
+  EXPECT_TRUE(parameter_blocks[0] == x || parameter_blocks[0] == y);
+  EXPECT_TRUE(parameter_blocks[1] == x || parameter_blocks[1] == y);
+
+  EXPECT_TRUE(problem.HasParameterBlock(x));
+  problem.RemoveParameterBlock(x);
+  EXPECT_FALSE(problem.HasParameterBlock(x));
+  problem.GetParameterBlocks(&parameter_blocks);
+  EXPECT_EQ(parameter_blocks.size(), 1);
+  EXPECT_TRUE(parameter_blocks[0] == y);
+}
+
+TEST_P(DynamicProblem, RemoveParameterBlockWithNoResiduals) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(z, GetParameterBlock(1)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(2)->user_state());
+
+  // w is at the end, which might break the swapping logic so try adding and
+  // removing it.
+  problem->RemoveParameterBlock(w);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(z, GetParameterBlock(1)->user_state());
+  problem->AddParameterBlock(w, 3);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(z, GetParameterBlock(1)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(2)->user_state());
+
+  // Now remove z, which is in the middle, and add it back.
+  problem->RemoveParameterBlock(z);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(1)->user_state());
+  problem->AddParameterBlock(z, 5);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(1)->user_state());
+  EXPECT_EQ(z, GetParameterBlock(2)->user_state());
+
+  // Now remove everything.
+  // y
+  problem->RemoveParameterBlock(y);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(z, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(1)->user_state());
+
+  // z
+  problem->RemoveParameterBlock(z);
+  ASSERT_EQ(1, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(w, GetParameterBlock(0)->user_state());
+
+  // w
+  problem->RemoveParameterBlock(w);
+  EXPECT_EQ(0, problem->NumParameterBlocks());
+  EXPECT_EQ(0, NumResidualBlocks());
+}
+
+TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  EXPECT_EQ(y, GetParameterBlock(0)->user_state());
+  EXPECT_EQ(z, GetParameterBlock(1)->user_state());
+  EXPECT_EQ(w, GetParameterBlock(2)->user_state());
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+
+  EXPECT_EQ(3, problem->NumParameterBlocks());
+  EXPECT_EQ(7, NumResidualBlocks());
+
+  // Remove w, which should remove r_yzw, r_yw, r_zw, r_w.
+  problem->RemoveParameterBlock(w);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
+
+  ASSERT_FALSE(HasResidualBlock(r_yzw));
+  ASSERT_TRUE (HasResidualBlock(r_yz ));
+  ASSERT_FALSE(HasResidualBlock(r_yw ));
+  ASSERT_FALSE(HasResidualBlock(r_zw ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+  ASSERT_FALSE(HasResidualBlock(r_w  ));
+
+  // Remove z, which will remove almost everything else.
+  problem->RemoveParameterBlock(z);
+  ASSERT_EQ(1, problem->NumParameterBlocks());
+  ASSERT_EQ(1, NumResidualBlocks());
+
+  ASSERT_FALSE(HasResidualBlock(r_yzw));
+  ASSERT_FALSE(HasResidualBlock(r_yz ));
+  ASSERT_FALSE(HasResidualBlock(r_yw ));
+  ASSERT_FALSE(HasResidualBlock(r_zw ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_FALSE(HasResidualBlock(r_z  ));
+  ASSERT_FALSE(HasResidualBlock(r_w  ));
+
+  // Remove y; all gone.
+  problem->RemoveParameterBlock(y);
+  EXPECT_EQ(0, problem->NumParameterBlocks());
+  EXPECT_EQ(0, NumResidualBlocks());
+}
+
+TEST_P(DynamicProblem, RemoveResidualBlock) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+
+  if (GetParam()) {
+    // In this test parameterization, there should be back-pointers from the
+    // parameter blocks to the residual blocks.
+    ExpectParameterBlockContains(y, r_yzw, r_yz, r_yw, r_y);
+    ExpectParameterBlockContains(z, r_yzw, r_yz, r_zw, r_z);
+    ExpectParameterBlockContains(w, r_yzw, r_yw, r_zw, r_w);
+  } else {
+    // Otherwise, nothing.
+    EXPECT_TRUE(GetParameterBlock(0)->mutable_residual_blocks() == NULL);
+    EXPECT_TRUE(GetParameterBlock(1)->mutable_residual_blocks() == NULL);
+    EXPECT_TRUE(GetParameterBlock(2)->mutable_residual_blocks() == NULL);
+  }
+  EXPECT_EQ(3, problem->NumParameterBlocks());
+  EXPECT_EQ(7, NumResidualBlocks());
+
+  // Remove each residual and check the state after each removal.
+
+  // Remove r_yzw.
+  problem->RemoveResidualBlock(r_yzw);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(6, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y, r_yz, r_yw, r_y);
+    ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
+    ExpectParameterBlockContains(w, r_yw, r_zw, r_w);
+  }
+  ASSERT_TRUE (HasResidualBlock(r_yz ));
+  ASSERT_TRUE (HasResidualBlock(r_yw ));
+  ASSERT_TRUE (HasResidualBlock(r_zw ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+  ASSERT_TRUE (HasResidualBlock(r_w  ));
+
+  // Remove r_yw.
+  problem->RemoveResidualBlock(r_yw);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(5, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y, r_yz, r_y);
+    ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
+    ExpectParameterBlockContains(w, r_zw, r_w);
+  }
+  ASSERT_TRUE (HasResidualBlock(r_yz ));
+  ASSERT_TRUE (HasResidualBlock(r_zw ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+  ASSERT_TRUE (HasResidualBlock(r_w  ));
+
+  // Remove r_zw.
+  problem->RemoveResidualBlock(r_zw);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(4, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y, r_yz, r_y);
+    ExpectParameterBlockContains(z, r_yz, r_z);
+    ExpectParameterBlockContains(w, r_w);
+  }
+  ASSERT_TRUE (HasResidualBlock(r_yz ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+  ASSERT_TRUE (HasResidualBlock(r_w  ));
+
+  // Remove r_w.
+  problem->RemoveResidualBlock(r_w);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y, r_yz, r_y);
+    ExpectParameterBlockContains(z, r_yz, r_z);
+    ExpectParameterBlockContains(w);
+  }
+  ASSERT_TRUE (HasResidualBlock(r_yz ));
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+
+  // Remove r_yz.
+  problem->RemoveResidualBlock(r_yz);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(2, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y, r_y);
+    ExpectParameterBlockContains(z, r_z);
+    ExpectParameterBlockContains(w);
+  }
+  ASSERT_TRUE (HasResidualBlock(r_y  ));
+  ASSERT_TRUE (HasResidualBlock(r_z  ));
+
+  // Remove the last two.
+  problem->RemoveResidualBlock(r_z);
+  problem->RemoveResidualBlock(r_y);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(0, NumResidualBlocks());
+  if (GetParam()) {
+    ExpectParameterBlockContains(y);
+    ExpectParameterBlockContains(z);
+    ExpectParameterBlockContains(w);
+  }
+}
+
+TEST_P(DynamicProblem, RemoveInvalidResidualBlockDies) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+
+  // Remove r_yzw.
+  problem->RemoveResidualBlock(r_yzw);
+  ASSERT_EQ(3, problem->NumParameterBlocks());
+  ASSERT_EQ(6, NumResidualBlocks());
+  // Attempt to remove r_yzw again.
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yzw), "not found");
+
+  // Attempt to remove a cast pointer never added as a residual.
+  int trash_memory = 1234;
+  ResidualBlock* invalid_residual =
+      reinterpret_cast<ResidualBlock*>(&trash_memory);
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(invalid_residual),
+                            "not found");
+
+  // Remove a parameter block, which in turn removes the dependent residuals
+  // then attempt to remove them directly.
+  problem->RemoveParameterBlock(z);
+  ASSERT_EQ(2, problem->NumParameterBlocks());
+  ASSERT_EQ(3, NumResidualBlocks());
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yz), "not found");
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_zw), "not found");
+  EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_z), "not found");
+
+  problem->RemoveResidualBlock(r_yw);
+  problem->RemoveResidualBlock(r_w);
+  problem->RemoveResidualBlock(r_y);
+}
+
+// Check that a null-terminated array, a, has the same elements as b.
+template<typename T>
+void ExpectVectorContainsUnordered(const T* a, const vector<T>& b) {
+  // Compute the size of a.
+  int size = 0;
+  while (a[size]) {
+    ++size;
+  }
+  ASSERT_EQ(size, b.size());
+
+  // Sort a.
+  vector<T> a_sorted(size);
+  copy(a, a + size, a_sorted.begin());
+  sort(a_sorted.begin(), a_sorted.end());
+
+  // Sort b.
+  vector<T> b_sorted(b);
+  sort(b_sorted.begin(), b_sorted.end());
+
+  // Compare.
+  for (int i = 0; i < size; ++i) {
+    EXPECT_EQ(a_sorted[i], b_sorted[i]);
+  }
+}
+
+void ExpectProblemHasResidualBlocks(
+    const ProblemImpl &problem,
+    const ResidualBlockId *expected_residual_blocks) {
+  vector<ResidualBlockId> residual_blocks;
+  problem.GetResidualBlocks(&residual_blocks);
+  ExpectVectorContainsUnordered(expected_residual_blocks, residual_blocks);
+}
+
+TEST_P(DynamicProblem, GetXXXBlocksForYYYBlock) {
+  problem->AddParameterBlock(y, 4);
+  problem->AddParameterBlock(z, 5);
+  problem->AddParameterBlock(w, 3);
+
+  // Add all combinations of cost functions.
+  CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+  CostFunction* cost_yz  = new BinaryCostFunction (1, 4, 5);
+  CostFunction* cost_yw  = new BinaryCostFunction (1, 4, 3);
+  CostFunction* cost_zw  = new BinaryCostFunction (1, 5, 3);
+  CostFunction* cost_y   = new UnaryCostFunction  (1, 4);
+  CostFunction* cost_z   = new UnaryCostFunction  (1, 5);
+  CostFunction* cost_w   = new UnaryCostFunction  (1, 3);
+
+  ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+  {
+    ResidualBlockId expected_residuals[] = {r_yzw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_yz  = problem->AddResidualBlock(cost_yz,  NULL, y, z);
+  {
+    ResidualBlockId expected_residuals[] = {r_yzw, r_yz, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_yw  = problem->AddResidualBlock(cost_yw,  NULL, y, w);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_zw  = problem->AddResidualBlock(cost_zw,  NULL, z, w);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_y   = problem->AddResidualBlock(cost_y,   NULL, y);
+  {
+    ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, r_y, 0};
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_z   = problem->AddResidualBlock(cost_z,   NULL, z);
+  {
+    ResidualBlock *expected_residuals[] = {
+      r_yzw, r_yz, r_yw, r_zw, r_y, r_z, 0
+    };
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+  ResidualBlock* r_w   = problem->AddResidualBlock(cost_w,   NULL, w);
+  {
+    ResidualBlock *expected_residuals[] = {
+      r_yzw, r_yz, r_yw, r_zw, r_y, r_z, r_w, 0
+    };
+    ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+  }
+
+  vector<double*> parameter_blocks;
+  vector<ResidualBlockId> residual_blocks;
+
+  // Check GetResidualBlocksForParameterBlock() for all parameter blocks.
+  struct GetResidualBlocksForParameterBlockTestCase {
+    double* parameter_block;
+    ResidualBlockId expected_residual_blocks[10];
+  };
+  GetResidualBlocksForParameterBlockTestCase get_residual_blocks_cases[] = {
+    { y, { r_yzw, r_yz, r_yw, r_y, NULL} },
+    { z, { r_yzw, r_yz, r_zw, r_z, NULL} },
+    { w, { r_yzw, r_yw, r_zw, r_w, NULL} },
+    { NULL }
+  };
+  for (int i = 0; get_residual_blocks_cases[i].parameter_block; ++i) {
+    problem->GetResidualBlocksForParameterBlock(
+        get_residual_blocks_cases[i].parameter_block,
+        &residual_blocks);
+    ExpectVectorContainsUnordered(
+        get_residual_blocks_cases[i].expected_residual_blocks,
+        residual_blocks);
+  }
+
+  // Check GetParameterBlocksForResidualBlock() for all residual blocks.
+  struct GetParameterBlocksForResidualBlockTestCase {
+    ResidualBlockId residual_block;
+    double* expected_parameter_blocks[10];
+  };
+  GetParameterBlocksForResidualBlockTestCase get_parameter_blocks_cases[] = {
+    { r_yzw, { y, z, w, NULL } },
+    { r_yz , { y, z, NULL } },
+    { r_yw , { y, w, NULL } },
+    { r_zw , { z, w, NULL } },
+    { r_y  , { y, NULL } },
+    { r_z  , { z, NULL } },
+    { r_w  , { w, NULL } },
+    { NULL }
+  };
+  for (int i = 0; get_parameter_blocks_cases[i].residual_block; ++i) {
+    problem->GetParameterBlocksForResidualBlock(
+        get_parameter_blocks_cases[i].residual_block,
+        &parameter_blocks);
+    ExpectVectorContainsUnordered(
+        get_parameter_blocks_cases[i].expected_parameter_blocks,
+        parameter_blocks);
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(OptionsInstantiation,
+                        DynamicProblem,
+                        ::testing::Values(true, false));
+
+// Test for Problem::Evaluate
+
+// r_i = i - (j + 1) * x_ij^2
+template <int kNumResiduals, int kNumParameterBlocks>
+class QuadraticCostFunction : public CostFunction {
+ public:
+  QuadraticCostFunction() {
+    CHECK_GT(kNumResiduals, 0);
+    CHECK_GT(kNumParameterBlocks, 0);
+    set_num_residuals(kNumResiduals);
+    for (int i = 0; i < kNumParameterBlocks; ++i) {
+      mutable_parameter_block_sizes()->push_back(kNumResiduals);
+    }
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < kNumResiduals; ++i) {
+      residuals[i] = i;
+      for (int j = 0; j < kNumParameterBlocks; ++j) {
+        residuals[i] -= (j + 1.0) * parameters[j][i] * parameters[j][i];
+      }
+    }
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    for (int j = 0; j < kNumParameterBlocks; ++j) {
+      if (jacobians[j] != NULL) {
+        MatrixRef(jacobians[j], kNumResiduals, kNumResiduals) =
+            (-2.0 * (j + 1.0) *
+             ConstVectorRef(parameters[j], kNumResiduals)).asDiagonal();
+      }
+    }
+
+    return true;
+  }
+};
+
+// Convert a CRSMatrix to a dense Eigen matrix.
+void CRSToDenseMatrix(const CRSMatrix& input, Matrix* output) {
+  CHECK(output != nullptr);
+  Matrix& m = *output;
+  m.resize(input.num_rows, input.num_cols);
+  m.setZero();
+  for (int row = 0; row < input.num_rows; ++row) {
+    for (int j = input.rows[row]; j < input.rows[row + 1]; ++j) {
+      const int col = input.cols[j];
+      m(row, col) = input.values[j];
+    }
+  }
+}
+
+class ProblemEvaluateTest : public ::testing::Test {
+ protected:
+  void SetUp() {
+    for (int i = 0; i < 6; ++i) {
+      parameters_[i] = static_cast<double>(i + 1);
+    }
+
+    parameter_blocks_.push_back(parameters_);
+    parameter_blocks_.push_back(parameters_ + 2);
+    parameter_blocks_.push_back(parameters_ + 4);
+
+
+    CostFunction* cost_function = new QuadraticCostFunction<2, 2>;
+
+    // f(x, y)
+    residual_blocks_.push_back(
+        problem_.AddResidualBlock(cost_function,
+                                  NULL,
+                                  parameters_,
+                                  parameters_ + 2));
+    // g(y, z)
+    residual_blocks_.push_back(
+        problem_.AddResidualBlock(cost_function,
+                                  NULL, parameters_ + 2,
+                                  parameters_ + 4));
+    // h(z, x)
+    residual_blocks_.push_back(
+        problem_.AddResidualBlock(cost_function,
+                                  NULL,
+                                  parameters_ + 4,
+                                  parameters_));
+  }
+
+  void TearDown() {
+    EXPECT_TRUE(problem_.program().IsValid());
+  }
+
+  void EvaluateAndCompare(const Problem::EvaluateOptions& options,
+                          const int expected_num_rows,
+                          const int expected_num_cols,
+                          const double expected_cost,
+                          const double* expected_residuals,
+                          const double* expected_gradient,
+                          const double* expected_jacobian) {
+    double cost;
+    vector<double> residuals;
+    vector<double> gradient;
+    CRSMatrix jacobian;
+
+    EXPECT_TRUE(
+        problem_.Evaluate(options,
+                          &cost,
+                          expected_residuals != NULL ? &residuals : NULL,
+                          expected_gradient != NULL ? &gradient : NULL,
+                          expected_jacobian != NULL ? &jacobian : NULL));
+
+    if (expected_residuals != NULL) {
+      EXPECT_EQ(residuals.size(), expected_num_rows);
+    }
+
+    if (expected_gradient != NULL) {
+      EXPECT_EQ(gradient.size(), expected_num_cols);
+    }
+
+    if (expected_jacobian != NULL) {
+      EXPECT_EQ(jacobian.num_rows, expected_num_rows);
+      EXPECT_EQ(jacobian.num_cols, expected_num_cols);
+    }
+
+    Matrix dense_jacobian;
+    if (expected_jacobian != NULL) {
+      CRSToDenseMatrix(jacobian, &dense_jacobian);
+    }
+
+    CompareEvaluations(expected_num_rows,
+                       expected_num_cols,
+                       expected_cost,
+                       expected_residuals,
+                       expected_gradient,
+                       expected_jacobian,
+                       cost,
+                       residuals.size() > 0 ? &residuals[0] : NULL,
+                       gradient.size() > 0 ? &gradient[0] : NULL,
+                       dense_jacobian.data());
+  }
+
+  void CheckAllEvaluationCombinations(const Problem::EvaluateOptions& options,
+                                      const ExpectedEvaluation& expected) {
+    for (int i = 0; i < 8; ++i) {
+      EvaluateAndCompare(options,
+                         expected.num_rows,
+                         expected.num_cols,
+                         expected.cost,
+                         (i & 1) ? expected.residuals : NULL,
+                         (i & 2) ? expected.gradient  : NULL,
+                         (i & 4) ? expected.jacobian  : NULL);
+    }
+  }
+
+  ProblemImpl problem_;
+  double parameters_[6];
+  vector<double*> parameter_blocks_;
+  vector<ResidualBlockId> residual_blocks_;
+};
+
+
+TEST_F(ProblemEvaluateTest, MultipleParameterAndResidualBlocks) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 6,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+      -27.0, -43.0   // h
+    },
+    // Gradient
+    {  146.0,  484.0,   // x
+       582.0, 1256.0,   // y
+      1450.0, 2604.0,   // z
+    },
+    // Jacobian
+    //                       x             y             z
+    { /* f(x, y) */ -2.0,  0.0, -12.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0, -16.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0,  -6.0,   0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0,  -8.0,   0.0, -24.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0,   0.0, -12.0
+    }
+  };
+
+  CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
+}
+
+TEST_F(ProblemEvaluateTest, ParameterAndResidualBlocksPassedInOptions) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 6,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+      -27.0, -43.0   // h
+    },
+    // Gradient
+    {  146.0,  484.0,   // x
+       582.0, 1256.0,   // y
+      1450.0, 2604.0,   // z
+    },
+    // Jacobian
+    //                       x             y             z
+    { /* f(x, y) */ -2.0,  0.0, -12.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0, -16.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0,  -6.0,   0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0,  -8.0,   0.0, -24.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0,   0.0, -12.0
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  evaluate_options.parameter_blocks = parameter_blocks_;
+  evaluate_options.residual_blocks = residual_blocks_;
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, ReorderedResidualBlocks) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 6,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -27.0, -43.0,  // h
+      -59.0, -87.0   // g
+    },
+    // Gradient
+    {  146.0,  484.0,   // x
+       582.0, 1256.0,   // y
+      1450.0, 2604.0,   // z
+    },
+    // Jacobian
+    //                       x             y             z
+    { /* f(x, y) */ -2.0,  0.0, -12.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0, -16.0,   0.0,   0.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0,   0.0, -12.0,
+      /* g(y, z) */  0.0,  0.0,  -6.0,   0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0,  -8.0,   0.0, -24.0
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  evaluate_options.parameter_blocks = parameter_blocks_;
+
+  // f, h, g
+  evaluate_options.residual_blocks.push_back(residual_blocks_[0]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[2]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[1]);
+
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, ReorderedResidualBlocksAndReorderedParameterBlocks) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 6,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -27.0, -43.0,  // h
+      -59.0, -87.0   // g
+    },
+    // Gradient
+    {  1450.0, 2604.0,   // z
+        582.0, 1256.0,   // y
+        146.0,  484.0,   // x
+    },
+     // Jacobian
+    //                       z             y             x
+    { /* f(x, y) */   0.0,   0.0, -12.0,   0.0,  -2.0,   0.0,
+                      0.0,   0.0,   0.0, -16.0,   0.0,  -4.0,
+      /* h(z, x) */ -10.0,   0.0,   0.0,   0.0,  -4.0,   0.0,
+                      0.0, -12.0,   0.0,   0.0,   0.0,  -8.0,
+      /* g(y, z) */ -20.0,   0.0,  -6.0,   0.0,   0.0,   0.0,
+                      0.0, -24.0,   0.0,  -8.0,   0.0,   0.0
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  // z, y, x
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]);
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[1]);
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]);
+
+  // f, h, g
+  evaluate_options.residual_blocks.push_back(residual_blocks_[0]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[2]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[1]);
+
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, ConstantParameterBlock) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 6,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+      -27.0, -43.0   // h
+    },
+
+    // Gradient
+    {  146.0,  484.0,  // x
+         0.0,    0.0,  // y
+      1450.0, 2604.0,  // z
+    },
+
+    // Jacobian
+    //                       x             y             z
+    { /* f(x, y) */ -2.0,  0.0,   0.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0,   0.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0,   0.0,   0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0,   0.0,   0.0, -24.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0,   0.0, -12.0
+    }
+  };
+
+  problem_.SetParameterBlockConstant(parameters_ + 2);
+  CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
+}
+
+TEST_F(ProblemEvaluateTest, ExcludedAResidualBlock) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    4, 6,
+    // Cost
+    2082.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -27.0, -43.0   // h
+    },
+    // Gradient
+    {  146.0,  484.0,   // x
+       228.0,  560.0,   // y
+       270.0,  516.0,   // z
+    },
+    // Jacobian
+    //                       x             y             z
+    { /* f(x, y) */ -2.0,  0.0, -12.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0, -16.0,   0.0,   0.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0,   0.0, -12.0
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  evaluate_options.residual_blocks.push_back(residual_blocks_[0]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[2]);
+
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, ExcludedParameterBlock) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 4,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+      -27.0, -43.0   // h
+    },
+
+    // Gradient
+    {  146.0,  484.0,  // x
+      1450.0, 2604.0,  // z
+    },
+
+    // Jacobian
+    //                       x             z
+    { /* f(x, y) */ -2.0,  0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0, -24.0,
+      /* h(z, x) */ -4.0,  0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0, -12.0
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  // x, z
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]);
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]);
+  evaluate_options.residual_blocks = residual_blocks_;
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, ExcludedParameterBlockAndExcludedResidualBlock) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    4, 4,
+    // Cost
+    6318.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+    },
+
+    // Gradient
+    {   38.0,  140.0,  // x
+      1180.0, 2088.0,  // z
+    },
+
+    // Jacobian
+    //                       x             z
+    { /* f(x, y) */ -2.0,  0.0,   0.0,   0.0,
+                     0.0, -4.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0, -20.0,   0.0,
+                     0.0,  0.0,   0.0, -24.0,
+    }
+  };
+
+  Problem::EvaluateOptions evaluate_options;
+  // x, z
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[0]);
+  evaluate_options.parameter_blocks.push_back(parameter_blocks_[2]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[0]);
+  evaluate_options.residual_blocks.push_back(residual_blocks_[1]);
+
+  CheckAllEvaluationCombinations(evaluate_options, expected);
+}
+
+TEST_F(ProblemEvaluateTest, LocalParameterization) {
+  ExpectedEvaluation expected = {
+    // Rows/columns
+    6, 5,
+    // Cost
+    7607.0,
+    // Residuals
+    { -19.0, -35.0,  // f
+      -59.0, -87.0,  // g
+      -27.0, -43.0   // h
+    },
+    // Gradient
+    {  146.0,  484.0,  // x
+      1256.0,          // y with SubsetParameterization
+      1450.0, 2604.0,  // z
+    },
+    // Jacobian
+    //                       x      y             z
+    { /* f(x, y) */ -2.0,  0.0,   0.0,   0.0,   0.0,
+                     0.0, -4.0, -16.0,   0.0,   0.0,
+      /* g(y, z) */  0.0,  0.0,   0.0, -20.0,   0.0,
+                     0.0,  0.0,  -8.0,   0.0, -24.0,
+      /* h(z, x) */ -4.0,  0.0,   0.0, -10.0,   0.0,
+                     0.0, -8.0,   0.0,   0.0, -12.0
+    }
+  };
+
+  vector<int> constant_parameters;
+  constant_parameters.push_back(0);
+  problem_.SetParameterization(parameters_ + 2,
+                               new SubsetParameterization(2,
+                                                          constant_parameters));
+
+  CheckAllEvaluationCombinations(Problem::EvaluateOptions(), expected);
+}
+
+TEST(Problem, SetAndGetParameterLowerBound) {
+  Problem problem;
+  double x[] = {1.0, 2.0};
+  problem.AddParameterBlock(x, 2);
+
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 0),
+            -std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 1),
+            -std::numeric_limits<double>::max());
+
+  problem.SetParameterLowerBound(x, 0, -1.0);
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 0), -1.0);
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 1),
+            -std::numeric_limits<double>::max());
+
+  problem.SetParameterLowerBound(x, 0, -2.0);
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 0), -2.0);
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 1),
+            -std::numeric_limits<double>::max());
+
+  problem.SetParameterLowerBound(x, 0, -std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 0),
+            -std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterLowerBound(x, 1),
+            -std::numeric_limits<double>::max());
+}
+
+TEST(Problem, SetAndGetParameterUpperBound) {
+  Problem problem;
+  double x[] = {1.0, 2.0};
+  problem.AddParameterBlock(x, 2);
+
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 1),
+            std::numeric_limits<double>::max());
+
+  problem.SetParameterUpperBound(x, 0, -1.0);
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 0), -1.0);
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 1),
+            std::numeric_limits<double>::max());
+
+  problem.SetParameterUpperBound(x, 0, -2.0);
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 0), -2.0);
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 1),
+            std::numeric_limits<double>::max());
+
+  problem.SetParameterUpperBound(x, 0, std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 0),
+            std::numeric_limits<double>::max());
+  EXPECT_EQ(problem.GetParameterUpperBound(x, 1),
+            std::numeric_limits<double>::max());
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
new file mode 100644
index 0000000..f1cd1bb
--- /dev/null
+++ b/internal/ceres/program.cc
@@ -0,0 +1,526 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/program.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "ceres/array_utils.h"
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cost_function.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
+#include "ceres/residual_block.h"
+#include "ceres/stl_util.h"
+#include "ceres/triplet_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+using std::max;
+using std::set;
+using std::string;
+using std::vector;
+
+Program::Program() {}
+
+Program::Program(const Program& program)
+    : parameter_blocks_(program.parameter_blocks_),
+      residual_blocks_(program.residual_blocks_) {
+}
+
+const vector<ParameterBlock*>& Program::parameter_blocks() const {
+  return parameter_blocks_;
+}
+
+const vector<ResidualBlock*>& Program::residual_blocks() const {
+  return residual_blocks_;
+}
+
+vector<ParameterBlock*>* Program::mutable_parameter_blocks() {
+  return &parameter_blocks_;
+}
+
+vector<ResidualBlock*>* Program::mutable_residual_blocks() {
+  return &residual_blocks_;
+}
+
+bool Program::StateVectorToParameterBlocks(const double *state) {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    if (!parameter_blocks_[i]->IsConstant() &&
+        !parameter_blocks_[i]->SetState(state)) {
+      return false;
+    }
+    state += parameter_blocks_[i]->Size();
+  }
+  return true;
+}
+
+void Program::ParameterBlocksToStateVector(double *state) const {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->GetState(state);
+    state += parameter_blocks_[i]->Size();
+  }
+}
+
+void Program::CopyParameterBlockStateToUserState() {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->GetState(parameter_blocks_[i]->mutable_user_state());
+  }
+}
+
+bool Program::SetParameterBlockStatePtrsToUserStatePtrs() {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    if (!parameter_blocks_[i]->IsConstant() &&
+        !parameter_blocks_[i]->SetState(parameter_blocks_[i]->user_state())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Program::Plus(const double* state,
+                   const double* delta,
+                   double* state_plus_delta) const {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    if (!parameter_blocks_[i]->Plus(state, delta, state_plus_delta)) {
+      return false;
+    }
+    state += parameter_blocks_[i]->Size();
+    delta += parameter_blocks_[i]->LocalSize();
+    state_plus_delta += parameter_blocks_[i]->Size();
+  }
+  return true;
+}
+
+void Program::SetParameterOffsetsAndIndex() {
+  // Set positions for all parameters appearing as arguments to residuals to one
+  // past the end of the parameter block array.
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks_[i];
+    for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
+      residual_block->parameter_blocks()[j]->set_index(-1);
+    }
+  }
+  // For parameters that appear in the program, set their position and offset.
+  int state_offset = 0;
+  int delta_offset = 0;
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->set_index(i);
+    parameter_blocks_[i]->set_state_offset(state_offset);
+    parameter_blocks_[i]->set_delta_offset(delta_offset);
+    state_offset += parameter_blocks_[i]->Size();
+    delta_offset += parameter_blocks_[i]->LocalSize();
+  }
+}
+
+bool Program::IsValid() const {
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    const ResidualBlock* residual_block = residual_blocks_[i];
+    if (residual_block->index() != i) {
+      LOG(WARNING) << "Residual block: " << i
+                   << " has incorrect index: " << residual_block->index();
+      return false;
+    }
+  }
+
+  int state_offset = 0;
+  int delta_offset = 0;
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->index() != i ||
+        parameter_block->state_offset() != state_offset ||
+        parameter_block->delta_offset() != delta_offset) {
+      LOG(WARNING) << "Parameter block: " << i
+                   << "has incorrect indexing information: "
+                   << parameter_block->ToString();
+      return false;
+    }
+
+    state_offset += parameter_blocks_[i]->Size();
+    delta_offset += parameter_blocks_[i]->LocalSize();
+  }
+
+  return true;
+}
+
+bool Program::ParameterBlocksAreFinite(string* message) const {
+  CHECK(message != nullptr);
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    const double* array = parameter_block->user_state();
+    const int size = parameter_block->Size();
+    const int invalid_index = FindInvalidValue(size, array);
+    if (invalid_index != size) {
+      *message = StringPrintf(
+          "ParameterBlock: %p with size %d has at least one invalid value.\n"
+          "First invalid value is at index: %d.\n"
+          "Parameter block values: ",
+          array, size, invalid_index);
+      AppendArrayToString(size, array, message);
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Program::IsBoundsConstrained() const {
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->IsConstant()) {
+      continue;
+    }
+    const int size = parameter_block->Size();
+    for (int j = 0; j < size; ++j) {
+      const double lower_bound = parameter_block->LowerBoundForParameter(j);
+      const double upper_bound = parameter_block->UpperBoundForParameter(j);
+      if (lower_bound > -std::numeric_limits<double>::max() ||
+          upper_bound < std::numeric_limits<double>::max()) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool Program::IsFeasible(string* message) const {
+  CHECK(message != nullptr);
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    const double* parameters = parameter_block->user_state();
+    const int size = parameter_block->Size();
+    if (parameter_block->IsConstant()) {
+      // Constant parameter blocks must start in the feasible region
+      // to ultimately produce a feasible solution, since Ceres cannot
+      // change them.
+      for (int j = 0; j < size; ++j) {
+        const double lower_bound = parameter_block->LowerBoundForParameter(j);
+        const double upper_bound = parameter_block->UpperBoundForParameter(j);
+        if (parameters[j] < lower_bound || parameters[j] > upper_bound) {
+          *message = StringPrintf(
+              "ParameterBlock: %p with size %d has at least one infeasible "
+              "value."
+              "\nFirst infeasible value is at index: %d."
+              "\nLower bound: %e, value: %e, upper bound: %e"
+              "\nParameter block values: ",
+              parameters, size, j, lower_bound, parameters[j], upper_bound);
+          AppendArrayToString(size, parameters, message);
+          return false;
+        }
+      }
+    } else {
+      // Variable parameter blocks must have non-empty feasible
+      // regions, otherwise there is no way to produce a feasible
+      // solution.
+      for (int j = 0; j < size; ++j) {
+        const double lower_bound = parameter_block->LowerBoundForParameter(j);
+        const double upper_bound = parameter_block->UpperBoundForParameter(j);
+        if (lower_bound >= upper_bound) {
+          *message = StringPrintf(
+              "ParameterBlock: %p with size %d has at least one infeasible "
+              "bound."
+              "\nFirst infeasible bound is at index: %d."
+              "\nLower bound: %e, upper bound: %e"
+              "\nParameter block values: ",
+              parameters, size, j, lower_bound, upper_bound);
+          AppendArrayToString(size, parameters, message);
+          return false;
+        }
+      }
+    }
+  }
+
+  return true;
+}
+
+Program* Program::CreateReducedProgram(
+    vector<double*>* removed_parameter_blocks,
+    double* fixed_cost,
+    string* error) const {
+  CHECK(removed_parameter_blocks != nullptr);
+  CHECK(fixed_cost != nullptr);
+  CHECK(error != nullptr);
+
+  std::unique_ptr<Program> reduced_program(new Program(*this));
+  if (!reduced_program->RemoveFixedBlocks(removed_parameter_blocks,
+                                          fixed_cost,
+                                          error)) {
+    return NULL;
+  }
+
+  reduced_program->SetParameterOffsetsAndIndex();
+  return reduced_program.release();
+}
+
+bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
+                                double* fixed_cost,
+                                string* error) {
+  CHECK(removed_parameter_blocks != nullptr);
+  CHECK(fixed_cost != nullptr);
+  CHECK(error != nullptr);
+
+  std::unique_ptr<double[]> residual_block_evaluate_scratch;
+  residual_block_evaluate_scratch.reset(
+      new double[MaxScratchDoublesNeededForEvaluate()]);
+  *fixed_cost = 0.0;
+
+  // Mark all the parameters as unused. Abuse the index member of the
+  // parameter blocks for the marking.
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    parameter_blocks_[i]->set_index(-1);
+  }
+
+  // Filter out residual that have all-constant parameters, and mark
+  // all the parameter blocks that appear in residuals.
+  int num_active_residual_blocks = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    ResidualBlock* residual_block = residual_blocks_[i];
+    int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+    // Determine if the residual block is fixed, and also mark varying
+    // parameters that appear in the residual block.
+    bool all_constant = true;
+    for (int k = 0; k < num_parameter_blocks; k++) {
+      ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
+      if (!parameter_block->IsConstant()) {
+        all_constant = false;
+        parameter_block->set_index(1);
+      }
+    }
+
+    if (!all_constant) {
+      residual_blocks_[num_active_residual_blocks++] = residual_block;
+      continue;
+    }
+
+    // The residual is constant and will be removed, so its cost is
+    // added to the variable fixed_cost.
+    double cost = 0.0;
+    if (!residual_block->Evaluate(true,
+                                  &cost,
+                                  NULL,
+                                  NULL,
+                                  residual_block_evaluate_scratch.get())) {
+      *error = StringPrintf("Evaluation of the residual %d failed during "
+                            "removal of fixed residual blocks.", i);
+      return false;
+    }
+    *fixed_cost += cost;
+  }
+  residual_blocks_.resize(num_active_residual_blocks);
+
+  // Filter out unused or fixed parameter blocks.
+  int num_active_parameter_blocks = 0;
+  removed_parameter_blocks->clear();
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (parameter_block->index() == -1) {
+      removed_parameter_blocks->push_back(
+          parameter_block->mutable_user_state());
+    } else {
+      parameter_blocks_[num_active_parameter_blocks++] = parameter_block;
+    }
+  }
+  parameter_blocks_.resize(num_active_parameter_blocks);
+
+  if (!(((NumResidualBlocks() == 0) &&
+         (NumParameterBlocks() == 0)) ||
+        ((NumResidualBlocks() != 0) &&
+         (NumParameterBlocks() != 0)))) {
+    *error =  "Congratulations, you found a bug in Ceres. Please report it.";
+    return false;
+  }
+
+  return true;
+}
+
+bool Program::IsParameterBlockSetIndependent(
+    const set<double*>& independent_set) const {
+  // Loop over each residual block and ensure that no two parameter
+  // blocks in the same residual block are part of
+  // parameter_block_ptrs as that would violate the assumption that it
+  // is an independent set in the Hessian matrix.
+  for (const ResidualBlock* residual_block : residual_blocks_) {
+    ParameterBlock* const* parameter_blocks =
+        residual_block->parameter_blocks();
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    int count = 0;
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      count += independent_set.count(
+          parameter_blocks[i]->mutable_user_state());
+    }
+    if (count > 1) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
+  // Matrix to store the block sparsity structure of the Jacobian.
+  TripletSparseMatrix* tsm =
+      new TripletSparseMatrix(NumParameterBlocks(),
+                              NumResidualBlocks(),
+                              10 * NumResidualBlocks());
+  int num_nonzeros = 0;
+  int* rows = tsm->mutable_rows();
+  int* cols = tsm->mutable_cols();
+  double* values = tsm->mutable_values();
+
+  for (int c = 0; c < residual_blocks_.size(); ++c) {
+    const ResidualBlock* residual_block = residual_blocks_[c];
+    const int num_parameter_blocks = residual_block->NumParameterBlocks();
+    ParameterBlock* const* parameter_blocks =
+        residual_block->parameter_blocks();
+
+    for (int j = 0; j < num_parameter_blocks; ++j) {
+      if (parameter_blocks[j]->IsConstant()) {
+        continue;
+      }
+
+      // Re-size the matrix if needed.
+      if (num_nonzeros >= tsm->max_num_nonzeros()) {
+        tsm->set_num_nonzeros(num_nonzeros);
+        tsm->Reserve(2 * num_nonzeros);
+        rows = tsm->mutable_rows();
+        cols = tsm->mutable_cols();
+        values = tsm->mutable_values();
+      }
+
+      const int r = parameter_blocks[j]->index();
+      rows[num_nonzeros] = r;
+      cols[num_nonzeros] = c;
+      values[num_nonzeros] = 1.0;
+      ++num_nonzeros;
+    }
+  }
+
+  tsm->set_num_nonzeros(num_nonzeros);
+  return tsm;
+}
+
+int Program::NumResidualBlocks() const {
+  return residual_blocks_.size();
+}
+
+int Program::NumParameterBlocks() const {
+  return parameter_blocks_.size();
+}
+
+int Program::NumResiduals() const {
+  int num_residuals = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    num_residuals += residual_blocks_[i]->NumResiduals();
+  }
+  return num_residuals;
+}
+
+int Program::NumParameters() const {
+  int num_parameters = 0;
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    num_parameters += parameter_blocks_[i]->Size();
+  }
+  return num_parameters;
+}
+
+int Program::NumEffectiveParameters() const {
+  int num_parameters = 0;
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    num_parameters += parameter_blocks_[i]->LocalSize();
+  }
+  return num_parameters;
+}
+
+int Program::MaxScratchDoublesNeededForEvaluate() const {
+  // Compute the scratch space needed for evaluate.
+  int max_scratch_bytes_for_evaluate = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    max_scratch_bytes_for_evaluate =
+        max(max_scratch_bytes_for_evaluate,
+            residual_blocks_[i]->NumScratchDoublesForEvaluate());
+  }
+  return max_scratch_bytes_for_evaluate;
+}
+
+int Program::MaxDerivativesPerResidualBlock() const {
+  int max_derivatives = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    int derivatives = 0;
+    ResidualBlock* residual_block = residual_blocks_[i];
+    int num_parameters = residual_block->NumParameterBlocks();
+    for (int j = 0; j < num_parameters; ++j) {
+      derivatives += residual_block->NumResiduals() *
+                     residual_block->parameter_blocks()[j]->LocalSize();
+    }
+    max_derivatives = max(max_derivatives, derivatives);
+  }
+  return max_derivatives;
+}
+
+int Program::MaxParametersPerResidualBlock() const {
+  int max_parameters = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    max_parameters = max(max_parameters,
+                         residual_blocks_[i]->NumParameterBlocks());
+  }
+  return max_parameters;
+}
+
+int Program::MaxResidualsPerResidualBlock() const {
+  int max_residuals = 0;
+  for (int i = 0; i < residual_blocks_.size(); ++i) {
+    max_residuals = max(max_residuals, residual_blocks_[i]->NumResiduals());
+  }
+  return max_residuals;
+}
+
+string Program::ToString() const {
+  string ret = "Program dump\n";
+  ret += StringPrintf("Number of parameter blocks: %d\n", NumParameterBlocks());
+  ret += StringPrintf("Number of parameters: %d\n", NumParameters());
+  ret += "Parameters:\n";
+  for (int i = 0; i < parameter_blocks_.size(); ++i) {
+    ret += StringPrintf("%d: %s\n",
+                        i, parameter_blocks_[i]->ToString().c_str());
+  }
+  return ret;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
new file mode 100644
index 0000000..38c958f
--- /dev/null
+++ b/internal/ceres/program.h
@@ -0,0 +1,192 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_PROGRAM_H_
+#define CERES_INTERNAL_PROGRAM_H_
+
+#include <set>
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class ParameterBlock;
+class ProblemImpl;
+class ResidualBlock;
+class TripletSparseMatrix;
+
+// A nonlinear least squares optimization problem. This is different from the
+// similarly-named "Problem" object, which offers a mutation interface for
+// adding and modifying parameters and residuals. The Program contains the core
+// part of the Problem, which is the parameters and the residuals, stored in a
+// particular ordering. The ordering is critical, since it defines the mapping
+// between (residual, parameter) pairs and a position in the jacobian of the
+// objective function. Various parts of Ceres transform one Program into
+// another; for example, the first stage of solving involves stripping all
+// constant parameters and residuals. This is in contrast with Problem, which is
+// not built for transformation.
+class Program {
+ public:
+  Program();
+  explicit Program(const Program& program);
+
+  // The ordered parameter and residual blocks for the program.
+  const std::vector<ParameterBlock*>& parameter_blocks() const;
+  const std::vector<ResidualBlock*>& residual_blocks() const;
+  std::vector<ParameterBlock*>* mutable_parameter_blocks();
+  std::vector<ResidualBlock*>* mutable_residual_blocks();
+
+  // Serialize to/from the program and update states.
+  //
+  // NOTE: Setting the state of a parameter block can trigger the
+  // computation of the Jacobian of its local parameterization. If
+  // this computation fails for some reason, then this method returns
+  // false and the state of the parameter blocks cannot be trusted.
+  bool StateVectorToParameterBlocks(const double *state);
+  void ParameterBlocksToStateVector(double *state) const;
+
+  // Copy internal state to the user's parameters.
+  void CopyParameterBlockStateToUserState();
+
+  // Set the parameter block pointers to the user pointers. Since this
+  // runs parameter block set state internally, which may call local
+  // parameterizations, this can fail. False is returned on failure.
+  bool SetParameterBlockStatePtrsToUserStatePtrs();
+
+  // Update a state vector for the program given a delta.
+  bool Plus(const double* state,
+            const double* delta,
+            double* state_plus_delta) const;
+
+  // Set the parameter indices and offsets. This permits mapping backward
+  // from a ParameterBlock* to an index in the parameter_blocks() vector. For
+  // any parameter block p, after calling SetParameterOffsetsAndIndex(), it
+  // is true that
+  //
+  //   parameter_blocks()[p->index()] == p
+  //
+  // If a parameter appears in a residual but not in the parameter block, then
+  // it will have an index of -1.
+  //
+  // This also updates p->state_offset() and p->delta_offset(), which are the
+  // position of the parameter in the state and delta vector respectively.
+  void SetParameterOffsetsAndIndex();
+
+  // Check if the internal state of the program (the indexing and the
+  // offsets) are correct.
+  bool IsValid() const;
+
+  bool ParameterBlocksAreFinite(std::string* message) const;
+
+  // Returns true if the program has any non-constant parameter blocks
+  // which have non-trivial bounds constraints.
+  bool IsBoundsConstrained() const;
+
+  // Returns false, if the program has any constant parameter blocks
+  // which are not feasible, or any variable parameter blocks which
+  // have a lower bound greater than or equal to the upper bound.
+  bool IsFeasible(std::string* message) const;
+
+  // Loop over each residual block and ensure that no two parameter
+  // blocks in the same residual block are part of
+  // parameter_blocks as that would violate the assumption that it
+  // is an independent set in the Hessian matrix.
+  bool IsParameterBlockSetIndependent(
+      const std::set<double*>& independent_set) const;
+
+  // Create a TripletSparseMatrix which contains the zero-one
+  // structure corresponding to the block sparsity of the transpose of
+  // the Jacobian matrix.
+  //
+  // Caller owns the result.
+  TripletSparseMatrix* CreateJacobianBlockSparsityTranspose() const;
+
+  // Create a copy of this program and removes constant parameter
+  // blocks and residual blocks with no varying parameter blocks while
+  // preserving their relative order.
+  //
+  // removed_parameter_blocks on exit will contain the list of
+  // parameter blocks that were removed.
+  //
+  // fixed_cost will be equal to the sum of the costs of the residual
+  // blocks that were removed.
+  //
+  // If there was a problem, then the function will return a NULL
+  // pointer and error will contain a human readable description of
+  // the problem.
+  Program* CreateReducedProgram(std::vector<double*>* removed_parameter_blocks,
+                                double* fixed_cost,
+                                std::string* error) const;
+
+  // See problem.h for what these do.
+  int NumParameterBlocks() const;
+  int NumParameters() const;
+  int NumEffectiveParameters() const;
+  int NumResidualBlocks() const;
+  int NumResiduals() const;
+
+  int MaxScratchDoublesNeededForEvaluate() const;
+  int MaxDerivativesPerResidualBlock() const;
+  int MaxParametersPerResidualBlock() const;
+  int MaxResidualsPerResidualBlock() const;
+
+  // A human-readable dump of the parameter blocks for debugging.
+  // TODO(keir): If necessary, also dump the residual blocks.
+  std::string ToString() const;
+
+ private:
+  // Remove constant parameter blocks and residual blocks with no
+  // varying parameter blocks while preserving their relative order.
+  //
+  // removed_parameter_blocks on exit will contain the list of
+  // parameter blocks that were removed.
+  //
+  // fixed_cost will be equal to the sum of the costs of the residual
+  // blocks that were removed.
+  //
+  // If there was a problem, then the function will return false and
+  // error will contain a human readable description of the problem.
+  bool RemoveFixedBlocks(std::vector<double*>* removed_parameter_blocks,
+                         double* fixed_cost,
+                         std::string* message);
+
+  // The Program does not own the ParameterBlock or ResidualBlock objects.
+  std::vector<ParameterBlock*> parameter_blocks_;
+  std::vector<ResidualBlock*> residual_blocks_;
+
+  friend class ProblemImpl;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PROGRAM_H_
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
new file mode 100644
index 0000000..6781eb7
--- /dev/null
+++ b/internal/ceres/program_evaluator.h
@@ -0,0 +1,381 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// The ProgramEvaluator runs the cost functions contained in each residual block
+// and stores the result into a jacobian. The particular type of jacobian is
+// abstracted out using two template parameters:
+//
+//   - An "EvaluatePreparer" that is responsible for creating the array with
+//     pointers to the jacobian blocks where the cost function evaluates to.
+//   - A "JacobianWriter" that is responsible for storing the resulting
+//     jacobian blocks in the passed sparse matrix.
+//
+// This abstraction affords an efficient evaluator implementation while still
+// supporting writing to multiple sparse matrix formats. For example, when the
+// ProgramEvaluator is parameterized for writing to block sparse matrices, the
+// residual jacobians are written directly into their final position in the
+// block sparse matrix by the user's CostFunction; there is no copying.
+//
+// The evaluation is threaded with OpenMP or C++11 threads.
+//
+// The EvaluatePreparer and JacobianWriter interfaces are as follows:
+//
+//   class EvaluatePreparer {
+//     // Prepare the jacobians array for use as the destination of a call to
+//     // a cost function's evaluate method.
+//     void Prepare(const ResidualBlock* residual_block,
+//                  int residual_block_index,
+//                  SparseMatrix* jacobian,
+//                  double** jacobians);
+//   }
+//
+//   class JacobianWriter {
+//     // Create a jacobian that this writer can write. Same as
+//     // Evaluator::CreateJacobian.
+//     SparseMatrix* CreateJacobian() const;
+//
+//     // Create num_threads evaluate preparers. Caller owns result which must
+//     // be freed with delete[]. Resulting preparers are valid while *this is.
+//     EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+//
+//     // Write the block jacobians from a residual block evaluation to the
+//     // larger sparse jacobian.
+//     void Write(int residual_id,
+//                int residual_offset,
+//                double** jacobians,
+//                SparseMatrix* jacobian);
+//   }
+//
+// Note: The ProgramEvaluator is not thread safe, since internally it maintains
+// some per-thread scratch space.
+
+#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
+#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/evaluation_callback.h"
+#include "ceres/execution_summary.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/parallel_for.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/small_blas.h"
+
+namespace ceres {
+namespace internal {
+
+struct NullJacobianFinalizer {
+  void operator()(SparseMatrix* jacobian, int num_parameters) {}
+};
+
+template<typename EvaluatePreparer,
+         typename JacobianWriter,
+         typename JacobianFinalizer = NullJacobianFinalizer>
+class ProgramEvaluator : public Evaluator {
+ public:
+  ProgramEvaluator(const Evaluator::Options &options, Program* program)
+      : options_(options),
+        program_(program),
+        jacobian_writer_(options, program),
+        evaluate_preparers_(
+            jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
+#ifdef CERES_NO_THREADS
+    if (options_.num_threads > 1) {
+      LOG(WARNING)
+          << "No threading support is compiled into this binary; "
+          << "only options.num_threads = 1 is supported. Switching "
+          << "to single threaded mode.";
+      options_.num_threads = 1;
+    }
+#endif // CERES_NO_THREADS
+
+    BuildResidualLayout(*program, &residual_layout_);
+    evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
+                                                   options.num_threads));
+  }
+
+  // Implementation of Evaluator interface.
+  SparseMatrix* CreateJacobian() const {
+    return jacobian_writer_.CreateJacobian();
+  }
+
+  bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
+                const double* state,
+                double* cost,
+                double* residuals,
+                double* gradient,
+                SparseMatrix* jacobian) {
+    ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
+    ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
+                                         ? "Evaluator::Residual"
+                                         : "Evaluator::Jacobian",
+                                         &execution_summary_);
+
+    // The parameters are stateful, so set the state before evaluating.
+    if (!program_->StateVectorToParameterBlocks(state)) {
+      return false;
+    }
+
+    // Notify the user about a new evaluation point if they are interested.
+    if (options_.evaluation_callback != NULL) {
+      program_->CopyParameterBlockStateToUserState();
+      options_.evaluation_callback->PrepareForEvaluation(
+          /*jacobians=*/(gradient != NULL || jacobian != NULL),
+          evaluate_options.new_evaluation_point);
+    }
+
+    if (residuals != NULL) {
+      VectorRef(residuals, program_->NumResiduals()).setZero();
+    }
+
+    if (jacobian != NULL) {
+      jacobian->SetZero();
+    }
+
+    // Each thread gets it's own cost and evaluate scratch space.
+    for (int i = 0; i < options_.num_threads; ++i) {
+      evaluate_scratch_[i].cost = 0.0;
+      if (gradient != NULL) {
+        VectorRef(evaluate_scratch_[i].gradient.get(),
+                  program_->NumEffectiveParameters()).setZero();
+      }
+    }
+
+    const int num_residual_blocks = program_->NumResidualBlocks();
+    // This bool is used to disable the loop if an error is encountered without
+    // breaking out of it. The remaining loop iterations are still run, but with
+    // an empty body, and so will finish quickly.
+    std::atomic_bool abort(false);
+    ParallelFor(
+        options_.context,
+        0,
+        num_residual_blocks,
+        options_.num_threads,
+        [&](int thread_id, int i) {
+          if (abort) {
+            return;
+          }
+
+          EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
+          EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
+
+          // Prepare block residuals if requested.
+          const ResidualBlock* residual_block = program_->residual_blocks()[i];
+          double* block_residuals = NULL;
+          if (residuals != NULL) {
+            block_residuals = residuals + residual_layout_[i];
+          } else if (gradient != NULL) {
+            block_residuals = scratch->residual_block_residuals.get();
+          }
+
+          // Prepare block jacobians if requested.
+          double** block_jacobians = NULL;
+          if (jacobian != NULL || gradient != NULL) {
+            preparer->Prepare(residual_block,
+                              i,
+                              jacobian,
+                              scratch->jacobian_block_ptrs.get());
+            block_jacobians = scratch->jacobian_block_ptrs.get();
+          }
+
+          // Evaluate the cost, residuals, and jacobians.
+          double block_cost;
+          if (!residual_block->Evaluate(
+                  evaluate_options.apply_loss_function,
+                  &block_cost,
+                  block_residuals,
+                  block_jacobians,
+                  scratch->residual_block_evaluate_scratch.get())) {
+            abort = true;
+            return;
+          }
+
+          scratch->cost += block_cost;
+
+          // Store the jacobians, if they were requested.
+          if (jacobian != NULL) {
+            jacobian_writer_.Write(i,
+                                   residual_layout_[i],
+                                   block_jacobians,
+                                   jacobian);
+          }
+
+          // Compute and store the gradient, if it was requested.
+          if (gradient != NULL) {
+            int num_residuals = residual_block->NumResiduals();
+            int num_parameter_blocks = residual_block->NumParameterBlocks();
+            for (int j = 0; j < num_parameter_blocks; ++j) {
+              const ParameterBlock* parameter_block =
+                  residual_block->parameter_blocks()[j];
+              if (parameter_block->IsConstant()) {
+                continue;
+              }
+
+              MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+                  block_jacobians[j],
+                  num_residuals,
+                  parameter_block->LocalSize(),
+                  block_residuals,
+                  scratch->gradient.get() + parameter_block->delta_offset());
+            }
+          }
+        });
+
+    if (!abort) {
+      const int num_parameters = program_->NumEffectiveParameters();
+
+      // Sum the cost and gradient (if requested) from each thread.
+      (*cost) = 0.0;
+      if (gradient != NULL) {
+        VectorRef(gradient, num_parameters).setZero();
+      }
+      for (int i = 0; i < options_.num_threads; ++i) {
+        (*cost) += evaluate_scratch_[i].cost;
+        if (gradient != NULL) {
+          VectorRef(gradient, num_parameters) +=
+              VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
+        }
+      }
+
+      // Finalize the Jacobian if it is available.
+      // `num_parameters` is passed to the finalizer so that additional
+      // storage can be reserved for additional diagonal elements if
+      // necessary.
+      if (jacobian != NULL) {
+        JacobianFinalizer f;
+        f(jacobian, num_parameters);
+      }
+    }
+    return !abort;
+  }
+
+  bool Plus(const double* state,
+            const double* delta,
+            double* state_plus_delta) const {
+    return program_->Plus(state, delta, state_plus_delta);
+  }
+
+  int NumParameters() const {
+    return program_->NumParameters();
+  }
+  int NumEffectiveParameters() const {
+    return program_->NumEffectiveParameters();
+  }
+
+  int NumResiduals() const {
+    return program_->NumResiduals();
+  }
+
+  virtual std::map<std::string, CallStatistics> Statistics() const {
+    return execution_summary_.statistics();
+  }
+
+ private:
+  // Per-thread scratch space needed to evaluate and store each residual block.
+  struct EvaluateScratch {
+    void Init(int max_parameters_per_residual_block,
+              int max_scratch_doubles_needed_for_evaluate,
+              int max_residuals_per_residual_block,
+              int num_parameters) {
+      residual_block_evaluate_scratch.reset(
+          new double[max_scratch_doubles_needed_for_evaluate]);
+      gradient.reset(new double[num_parameters]);
+      VectorRef(gradient.get(), num_parameters).setZero();
+      residual_block_residuals.reset(
+          new double[max_residuals_per_residual_block]);
+      jacobian_block_ptrs.reset(
+          new double*[max_parameters_per_residual_block]);
+    }
+
+    double cost;
+    std::unique_ptr<double[]> residual_block_evaluate_scratch;
+    // The gradient in the local parameterization.
+    std::unique_ptr<double[]> gradient;
+    // Enough space to store the residual for the largest residual block.
+    std::unique_ptr<double[]> residual_block_residuals;
+    std::unique_ptr<double*[]> jacobian_block_ptrs;
+  };
+
+  static void BuildResidualLayout(const Program& program,
+                                  std::vector<int>* residual_layout) {
+    const std::vector<ResidualBlock*>& residual_blocks =
+        program.residual_blocks();
+    residual_layout->resize(program.NumResidualBlocks());
+    int residual_pos = 0;
+    for (int i = 0; i < residual_blocks.size(); ++i) {
+      const int num_residuals = residual_blocks[i]->NumResiduals();
+      (*residual_layout)[i] = residual_pos;
+      residual_pos += num_residuals;
+    }
+  }
+
+  // Create scratch space for each thread evaluating the program.
+  static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
+                                                 int num_threads) {
+    int max_parameters_per_residual_block =
+        program.MaxParametersPerResidualBlock();
+    int max_scratch_doubles_needed_for_evaluate =
+        program.MaxScratchDoublesNeededForEvaluate();
+    int max_residuals_per_residual_block =
+        program.MaxResidualsPerResidualBlock();
+    int num_parameters = program.NumEffectiveParameters();
+
+    EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
+    for (int i = 0; i < num_threads; i++) {
+      evaluate_scratch[i].Init(max_parameters_per_residual_block,
+                               max_scratch_doubles_needed_for_evaluate,
+                               max_residuals_per_residual_block,
+                               num_parameters);
+    }
+    return evaluate_scratch;
+  }
+
+  Evaluator::Options options_;
+  Program* program_;
+  JacobianWriter jacobian_writer_;
+  std::unique_ptr<EvaluatePreparer[]> evaluate_preparers_;
+  std::unique_ptr<EvaluateScratch[]> evaluate_scratch_;
+  std::vector<int> residual_layout_;
+  ::ceres::internal::ExecutionSummary execution_summary_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
diff --git a/internal/ceres/program_test.cc b/internal/ceres/program_test.cc
new file mode 100644
index 0000000..6cb8e9e
--- /dev/null
+++ b/internal/ceres/program_test.cc
@@ -0,0 +1,422 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/program.h"
+
+#include <cmath>
+#include <limits>
+#include <memory>
+#include <vector>
+
+#include "ceres/internal/integer_sequence_algorithm.h"
+#include "ceres/problem_impl.h"
+#include "ceres/residual_block.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = parameters[0][0];
+    if (jacobians != nullptr && jacobians[0] != nullptr) {
+      jacobians[0][0] = 1.0;
+    }
+    return true;
+  }
+};
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int... Ns>
+class MockCostFunctionBase : public SizedCostFunction<kNumResiduals, Ns...> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    const int kNumParameters = Sum<integer_sequence<int, Ns...>>::Value;
+
+    for (int i = 0; i < kNumResiduals; ++i) {
+      residuals[i] = kNumResiduals + kNumParameters;
+    }
+    return true;
+  }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(Program, RemoveFixedBlocksNothingConstant) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), nullptr, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
+  problem.AddResidualBlock(new TernaryCostFunction(), nullptr, &x, &y, &z);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 3);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 3);
+  EXPECT_EQ(removed_parameter_blocks.size(), 0);
+  EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksAllParameterBlocksConstant) {
+  ProblemImpl problem;
+  double x = 1.0;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), nullptr, &x);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+  EXPECT_EQ(removed_parameter_blocks.size(), 1);
+  EXPECT_EQ(removed_parameter_blocks[0], &x);
+  EXPECT_EQ(fixed_cost, 9.0);
+}
+
+
+TEST(Program, RemoveFixedBlocksNoResidualBlocks) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+  EXPECT_EQ(removed_parameter_blocks.size(), 3);
+  EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksOneParameterBlockConstant) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new UnaryCostFunction(), nullptr, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 1);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 1);
+}
+
+TEST(Program, RemoveFixedBlocksNumEliminateBlocks) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryCostFunction(), nullptr, &x);
+  problem.AddResidualBlock(new TernaryCostFunction(), nullptr, &x, &y, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+}
+
+TEST(Program, RemoveFixedBlocksFixedCost) {
+  ProblemImpl problem;
+  double x = 1.23;
+  double y = 4.56;
+  double z = 7.89;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+  problem.AddResidualBlock(new UnaryIdentityCostFunction(), nullptr, &x);
+  problem.AddResidualBlock(new TernaryCostFunction(), nullptr, &x, &y, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), nullptr, &x, &y);
+  problem.SetParameterBlockConstant(&x);
+
+  ResidualBlock *expected_removed_block =
+      problem.program().residual_blocks()[0];
+  std::unique_ptr<double[]> scratch(
+      new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
+  double expected_fixed_cost;
+  expected_removed_block->Evaluate(true,
+                                   &expected_fixed_cost,
+                                   nullptr,
+                                   nullptr,
+                                   scratch.get());
+
+
+  vector<double*> removed_parameter_blocks;
+  double fixed_cost = 0.0;
+  string message;
+  std::unique_ptr<Program> reduced_program(
+      problem.program().CreateReducedProgram(
+          &removed_parameter_blocks, &fixed_cost, &message));
+
+  EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+  EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+  EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
+}
+
+TEST(Program, CreateJacobianBlockSparsityTranspose) {
+  ProblemImpl problem;
+  double x[2];
+  double y[3];
+  double z;
+
+  problem.AddParameterBlock(x, 2);
+  problem.AddParameterBlock(y, 3);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2>(), nullptr, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2>(), nullptr, &z, x);
+  problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3>(), nullptr, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3>(), nullptr, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1>(), nullptr, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3>(), nullptr, &z, y);
+  problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1>(), nullptr, x, &z);
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 3>(), nullptr, y);
+
+  TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
+  {
+    int* rows = expected_block_sparse_jacobian.mutable_rows();
+    int* cols = expected_block_sparse_jacobian.mutable_cols();
+    double* values = expected_block_sparse_jacobian.mutable_values();
+    rows[0] = 0;
+    cols[0] = 0;
+
+    rows[1] = 2;
+    cols[1] = 1;
+    rows[2] = 0;
+    cols[2] = 1;
+
+    rows[3] = 2;
+    cols[3] = 2;
+    rows[4] = 1;
+    cols[4] = 2;
+
+    rows[5] = 2;
+    cols[5] = 3;
+    rows[6] = 1;
+    cols[6] = 3;
+
+    rows[7] = 0;
+    cols[7] = 4;
+    rows[8] = 2;
+    cols[8] = 4;
+
+    rows[9] = 2;
+    cols[9] = 5;
+    rows[10] = 1;
+    cols[10] = 5;
+
+    rows[11] = 0;
+    cols[11] = 6;
+    rows[12] = 2;
+    cols[12] = 6;
+
+    rows[13] = 1;
+    cols[13] = 7;
+    std::fill(values, values + 14, 1.0);
+    expected_block_sparse_jacobian.set_num_nonzeros(14);
+  }
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  std::unique_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  Matrix expected_dense_jacobian;
+  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+  Matrix actual_dense_jacobian;
+  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+template <int kNumResiduals, int kNumParameterBlocks>
+class NumParameterBlocksCostFunction : public CostFunction {
+ public:
+  NumParameterBlocksCostFunction() {
+    set_num_residuals(kNumResiduals);
+    for (int i = 0; i < kNumParameterBlocks; ++i) {
+      mutable_parameter_block_sizes()->push_back(1);
+    }
+  }
+
+  virtual ~NumParameterBlocksCostFunction() {
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    return true;
+  }
+};
+
+TEST(Program, ReallocationInCreateJacobianBlockSparsityTranspose) {
+  // CreateJacobianBlockSparsityTranspose starts with a conservative
+  // estimate of the size of the sparsity pattern. This test ensures
+  // that when those estimates are violated, the reallocation/resizing
+  // logic works correctly.
+
+  ProblemImpl problem;
+  double x[20];
+
+  vector<double*> parameter_blocks;
+  for (int i = 0; i < 20; ++i) {
+    problem.AddParameterBlock(x + i, 1);
+    parameter_blocks.push_back(x + i);
+  }
+
+  problem.AddResidualBlock(new NumParameterBlocksCostFunction<1, 20>(),
+                           nullptr,
+                           parameter_blocks.data(),
+                           static_cast<int>(parameter_blocks.size()));
+
+  TripletSparseMatrix expected_block_sparse_jacobian(20, 1, 20);
+  {
+    int* rows = expected_block_sparse_jacobian.mutable_rows();
+    int* cols = expected_block_sparse_jacobian.mutable_cols();
+    for (int i = 0; i < 20; ++i) {
+      rows[i] = i;
+      cols[i] = 0;
+    }
+
+    double* values = expected_block_sparse_jacobian.mutable_values();
+    std::fill(values, values + 20, 1.0);
+    expected_block_sparse_jacobian.set_num_nonzeros(20);
+  }
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  std::unique_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  Matrix expected_dense_jacobian;
+  expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+  Matrix actual_dense_jacobian;
+  actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+  EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+TEST(Program, ProblemHasNanParameterBlocks) {
+  ProblemImpl problem;
+  double x[2];
+  x[0] = 1.0;
+  x[1] = std::numeric_limits<double>::quiet_NaN();
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2>(), nullptr, x);
+  string error;
+  EXPECT_FALSE(problem.program().ParameterBlocksAreFinite(&error));
+  EXPECT_NE(error.find("has at least one invalid value"),
+            string::npos) << error;
+}
+
+TEST(Program, InfeasibleParameterBlock) {
+  ProblemImpl problem;
+  double x[] = {0.0, 0.0};
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2>(), nullptr, x);
+  problem.SetParameterLowerBound(x, 0, 2.0);
+  problem.SetParameterUpperBound(x, 0, 1.0);
+  string error;
+  EXPECT_FALSE(problem.program().IsFeasible(&error));
+  EXPECT_NE(error.find("infeasible bound"), string::npos) << error;
+}
+
+TEST(Program, InfeasibleConstantParameterBlock) {
+  ProblemImpl problem;
+  double x[] = {0.0, 0.0};
+  problem.AddResidualBlock(new MockCostFunctionBase<1, 2>(), nullptr, x);
+  problem.SetParameterLowerBound(x, 0, 1.0);
+  problem.SetParameterUpperBound(x, 0, 2.0);
+  problem.SetParameterBlockConstant(x);
+  string error;
+  EXPECT_FALSE(problem.program().IsFeasible(&error));
+  EXPECT_NE(error.find("infeasible value"), string::npos) << error;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/random.h b/internal/ceres/random.h
new file mode 100644
index 0000000..87d9d77
--- /dev/null
+++ b/internal/ceres/random.h
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_RANDOM_H_
+#define CERES_INTERNAL_RANDOM_H_
+
+#include <cmath>
+#include <cstdlib>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+inline void SetRandomState(int state) {
+  srand(state);
+}
+
+inline int Uniform(int n) {
+  if (n) {
+    return rand() % n;
+  } else {
+    return 0;
+  }
+}
+
+inline double RandDouble() {
+  double r = static_cast<double>(rand());
+  return r / RAND_MAX;
+}
+
+// Box-Muller algorithm for normal random number generation.
+// http://en.wikipedia.org/wiki/Box-Muller_transform
+inline double RandNormal() {
+  double x1, x2, w;
+  do {
+    x1 = 2.0 * RandDouble() - 1.0;
+    x2 = 2.0 * RandDouble() - 1.0;
+    w = x1 * x1 + x2 * x2;
+  } while ( w >= 1.0 || w == 0.0 );
+
+  w = sqrt((-2.0 * log(w)) / w);
+  return x1 * w;
+}
+
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_RANDOM_H_
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
new file mode 100644
index 0000000..5a3fbfd
--- /dev/null
+++ b/internal/ceres/reorder_program.cc
@@ -0,0 +1,606 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include <algorithm>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "ceres/cxsparse.h"
+#include "ceres/internal/port.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "Eigen/SparseCore"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/OrderingMethods"
+#endif
+
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::map;
+using std::set;
+using std::string;
+using std::vector;
+
+namespace {
+
+// Find the minimum index of any parameter block to the given
+// residual.  Parameter blocks that have indices greater than
+// size_of_first_elimination_group are considered to have an index
+// equal to size_of_first_elimination_group.
+static int MinParameterBlock(const ResidualBlock* residual_block,
+                             int size_of_first_elimination_group) {
+  int min_parameter_block_position = size_of_first_elimination_group;
+  for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
+    ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
+    if (!parameter_block->IsConstant()) {
+      CHECK_NE(parameter_block->index(), -1)
+          << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
+          << "This is a Ceres bug; please contact the developers!";
+      min_parameter_block_position = std::min(parameter_block->index(),
+                                              min_parameter_block_position);
+    }
+  }
+  return min_parameter_block_position;
+}
+
+#if EIGEN_VERSION_AT_LEAST(3, 2, 2) && defined(CERES_USE_EIGEN_SPARSE)
+Eigen::SparseMatrix<int> CreateBlockJacobian(
+    const TripletSparseMatrix& block_jacobian_transpose) {
+  typedef Eigen::SparseMatrix<int> SparseMatrix;
+  typedef Eigen::Triplet<int> Triplet;
+
+  const int* rows = block_jacobian_transpose.rows();
+  const int* cols = block_jacobian_transpose.cols();
+  int num_nonzeros = block_jacobian_transpose.num_nonzeros();
+  vector<Triplet> triplets;
+  triplets.reserve(num_nonzeros);
+  for (int i = 0; i < num_nonzeros; ++i) {
+    triplets.push_back(Triplet(cols[i], rows[i], 1));
+  }
+
+  SparseMatrix block_jacobian(block_jacobian_transpose.num_cols(),
+                              block_jacobian_transpose.num_rows());
+  block_jacobian.setFromTriplets(triplets.begin(), triplets.end());
+  return block_jacobian;
+}
+#endif
+
+void OrderingForSparseNormalCholeskyUsingSuiteSparse(
+    const TripletSparseMatrix& tsm_block_jacobian_transpose,
+    const vector<ParameterBlock*>& parameter_blocks,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    int* ordering) {
+#ifdef CERES_NO_SUITESPARSE
+  LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+             << "Please report this error to the developers.";
+#else
+  SuiteSparse ss;
+  cholmod_sparse* block_jacobian_transpose =
+      ss.CreateSparseMatrix(
+          const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+
+  // No CAMD or the user did not supply a useful ordering, then just
+  // use regular AMD.
+  if (parameter_block_ordering.NumGroups() <= 1 ||
+      !SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+    ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
+  } else {
+    vector<int> constraints;
+    for (int i = 0; i < parameter_blocks.size(); ++i) {
+      constraints.push_back(
+          parameter_block_ordering.GroupId(
+              parameter_blocks[i]->mutable_user_state()));
+    }
+
+    // Renumber the entries of constraints to be contiguous integers
+    // as CAMD requires that the group ids be in the range [0,
+    // parameter_blocks.size() - 1].
+    MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+    ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                                   &constraints[0],
+                                                   ordering);
+  }
+
+  VLOG(2) << "Block ordering stats: "
+          << " flops: " << ss.mutable_cc()->fl
+          << " lnz  : " << ss.mutable_cc()->lnz
+          << " anz  : " << ss.mutable_cc()->anz;
+
+  ss.Free(block_jacobian_transpose);
+#endif  // CERES_NO_SUITESPARSE
+}
+
+void OrderingForSparseNormalCholeskyUsingCXSparse(
+    const TripletSparseMatrix& tsm_block_jacobian_transpose,
+    int* ordering) {
+#ifdef CERES_NO_CXSPARSE
+  LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+             << "Please report this error to the developers.";
+#else  // CERES_NO_CXSPARSE
+  // CXSparse works with J'J instead of J'. So compute the block
+  // sparsity for J'J and compute an approximate minimum degree
+  // ordering.
+  CXSparse cxsparse;
+  cs_di* block_jacobian_transpose;
+  block_jacobian_transpose =
+      cxsparse.CreateSparseMatrix(
+            const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+  cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
+  cs_di* block_hessian =
+      cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
+  cxsparse.Free(block_jacobian);
+  cxsparse.Free(block_jacobian_transpose);
+
+  cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
+  cxsparse.Free(block_hessian);
+#endif  // CERES_NO_CXSPARSE
+}
+
+
+#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
+void OrderingForSparseNormalCholeskyUsingEigenSparse(
+    const TripletSparseMatrix& tsm_block_jacobian_transpose,
+    int* ordering) {
+#ifndef CERES_USE_EIGEN_SPARSE
+  LOG(FATAL) <<
+      "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+      "because Ceres was not built with support for "
+      "Eigen's SimplicialLDLT decomposition. "
+      "This requires enabling building with -DEIGENSPARSE=ON.";
+#else
+
+  // This conversion from a TripletSparseMatrix to a Eigen::Triplet
+  // matrix is unfortunate, but unavoidable for now. It is not a
+  // significant performance penalty in the grand scheme of
+  // things. The right thing to do here would be to get a compressed
+  // row sparse matrix representation of the jacobian and go from
+  // there. But that is a project for another day.
+  typedef Eigen::SparseMatrix<int> SparseMatrix;
+
+  const SparseMatrix block_jacobian =
+      CreateBlockJacobian(tsm_block_jacobian_transpose);
+  const SparseMatrix block_hessian =
+      block_jacobian.transpose() * block_jacobian;
+
+  Eigen::AMDOrdering<int> amd_ordering;
+  Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic, int> perm;
+  amd_ordering(block_hessian, perm);
+  for (int i = 0; i < block_hessian.rows(); ++i) {
+    ordering[i] = perm.indices()[i];
+  }
+#endif  // CERES_USE_EIGEN_SPARSE
+}
+#endif
+
+}  // namespace
+
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+                   const ParameterBlockOrdering& ordering,
+                   Program* program,
+                   string* error) {
+  const int num_parameter_blocks =  program->NumParameterBlocks();
+  if (ordering.NumElements() != num_parameter_blocks) {
+    *error = StringPrintf("User specified ordering does not have the same "
+                          "number of parameters as the problem. The problem"
+                          "has %d blocks while the ordering has %d blocks.",
+                          num_parameter_blocks,
+                          ordering.NumElements());
+    return false;
+  }
+
+  vector<ParameterBlock*>* parameter_blocks =
+      program->mutable_parameter_blocks();
+  parameter_blocks->clear();
+
+  const map<int, set<double*>>& groups = ordering.group_to_elements();
+  for (const auto& p : groups) {
+    const set<double*>& group = p.second;
+    for (double* parameter_block_ptr : group) {
+      auto it = parameter_map.find(parameter_block_ptr);
+      if (it == parameter_map.end()) {
+        *error = StringPrintf("User specified ordering contains a pointer "
+                              "to a double that is not a parameter block in "
+                              "the problem. The invalid double is in group: %d",
+                              p.first);
+        return false;
+      }
+      parameter_blocks->push_back(it->second);
+    }
+  }
+  return true;
+}
+
+bool LexicographicallyOrderResidualBlocks(
+    const int size_of_first_elimination_group,
+    Program* program,
+    string* error) {
+  CHECK_GE(size_of_first_elimination_group, 1)
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  // Create a histogram of the number of residuals for each E block. There is an
+  // extra bucket at the end to catch all non-eliminated F blocks.
+  vector<int> residual_blocks_per_e_block(size_of_first_elimination_group + 1);
+  vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
+  vector<int> min_position_per_residual(residual_blocks->size());
+  for (int i = 0; i < residual_blocks->size(); ++i) {
+    ResidualBlock* residual_block = (*residual_blocks)[i];
+    int position = MinParameterBlock(residual_block,
+                                     size_of_first_elimination_group);
+    min_position_per_residual[i] = position;
+    DCHECK_LE(position, size_of_first_elimination_group);
+    residual_blocks_per_e_block[position]++;
+  }
+
+  // Run a cumulative sum on the histogram, to obtain offsets to the start of
+  // each histogram bucket (where each bucket is for the residuals for that
+  // E-block).
+  vector<int> offsets(size_of_first_elimination_group + 1);
+  std::partial_sum(residual_blocks_per_e_block.begin(),
+                   residual_blocks_per_e_block.end(),
+                   offsets.begin());
+  CHECK_EQ(offsets.back(), residual_blocks->size())
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  CHECK(find(residual_blocks_per_e_block.begin(),
+             residual_blocks_per_e_block.end() - 1, 0) !=
+        residual_blocks_per_e_block.end())
+      << "Congratulations, you found a Ceres bug! Please report this error "
+      << "to the developers.";
+
+  // Fill in each bucket with the residual blocks for its corresponding E block.
+  // Each bucket is individually filled from the back of the bucket to the front
+  // of the bucket. The filling order among the buckets is dictated by the
+  // residual blocks. This loop uses the offsets as counters; subtracting one
+  // from each offset as a residual block is placed in the bucket. When the
+  // filling is finished, the offset pointerts should have shifted down one
+  // entry (this is verified below).
+  vector<ResidualBlock*> reordered_residual_blocks(
+      (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
+  for (int i = 0; i < residual_blocks->size(); ++i) {
+    int bucket = min_position_per_residual[i];
+
+    // Decrement the cursor, which should now point at the next empty position.
+    offsets[bucket]--;
+
+    // Sanity.
+    CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+
+    reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
+  }
+
+  // Sanity check #1: The difference in bucket offsets should match the
+  // histogram sizes.
+  for (int i = 0; i < size_of_first_elimination_group; ++i) {
+    CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+  }
+  // Sanity check #2: No NULL's left behind.
+  for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
+    CHECK(reordered_residual_blocks[i] != NULL)
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+  }
+
+  // Now that the residuals are collected by E block, swap them in place.
+  swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
+  return true;
+}
+
+// Pre-order the columns corresponding to the schur complement if
+// possible.
+void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program) {
+#ifndef CERES_NO_SUITESPARSE
+  SuiteSparse ss;
+  if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+    return;
+  }
+
+  vector<int> constraints;
+  vector<ParameterBlock*>& parameter_blocks =
+      *(program->mutable_parameter_blocks());
+
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    constraints.push_back(
+        parameter_block_ordering.GroupId(
+            parameter_blocks[i]->mutable_user_state()));
+  }
+
+  // Renumber the entries of constraints to be contiguous integers as
+  // CAMD requires that the group ids be in the range [0,
+  // parameter_blocks.size() - 1].
+  MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+
+  // Compute a block sparse presentation of J'.
+  std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  cholmod_sparse* block_jacobian_transpose =
+      ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+
+  vector<int> ordering(parameter_blocks.size(), 0);
+  ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+                                                 &constraints[0],
+                                                 &ordering[0]);
+  ss.Free(block_jacobian_transpose);
+
+  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  }
+
+  program->SetParameterOffsetsAndIndex();
+#endif
+}
+
+void MaybeReorderSchurComplementColumnsUsingEigen(
+    const int size_of_first_elimination_group,
+    const ProblemImpl::ParameterMap& parameter_map,
+    Program* program) {
+#if !EIGEN_VERSION_AT_LEAST(3, 2, 2) || !defined(CERES_USE_EIGEN_SPARSE)
+  return;
+#else
+
+  std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  typedef Eigen::SparseMatrix<int> SparseMatrix;
+  const SparseMatrix block_jacobian =
+      CreateBlockJacobian(*tsm_block_jacobian_transpose);
+  const int num_rows = block_jacobian.rows();
+  const int num_cols = block_jacobian.cols();
+
+  // Vertically partition the jacobian in parameter blocks of type E
+  // and F.
+  const SparseMatrix E =
+      block_jacobian.block(0,
+                           0,
+                           num_rows,
+                           size_of_first_elimination_group);
+  const SparseMatrix F =
+      block_jacobian.block(0,
+                           size_of_first_elimination_group,
+                           num_rows,
+                           num_cols - size_of_first_elimination_group);
+
+  // Block sparsity pattern of the schur complement.
+  const SparseMatrix block_schur_complement =
+      F.transpose() * F - F.transpose() * E * E.transpose() * F;
+
+  Eigen::AMDOrdering<int> amd_ordering;
+  Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic, int> perm;
+  amd_ordering(block_schur_complement, perm);
+
+  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+  vector<ParameterBlock*> ordering(num_cols);
+
+  // The ordering of the first size_of_first_elimination_group does
+  // not matter, so we preserve the existing ordering.
+  for (int i = 0; i < size_of_first_elimination_group; ++i) {
+    ordering[i] = parameter_blocks[i];
+  }
+
+  // For the rest of the blocks, use the ordering computed using AMD.
+  for (int i = 0; i < block_schur_complement.cols(); ++i) {
+    ordering[size_of_first_elimination_group + i] =
+        parameter_blocks[size_of_first_elimination_group + perm.indices()[i]];
+  }
+
+  swap(*program->mutable_parameter_blocks(), ordering);
+  program->SetParameterOffsetsAndIndex();
+#endif
+}
+
+bool ReorderProgramForSchurTypeLinearSolver(
+    const LinearSolverType linear_solver_type,
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ProblemImpl::ParameterMap& parameter_map,
+    ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    string* error) {
+  if (parameter_block_ordering->NumElements() !=
+      program->NumParameterBlocks()) {
+    *error = StringPrintf(
+        "The program has %d parameter blocks, but the parameter block "
+        "ordering has %d parameter blocks.",
+        program->NumParameterBlocks(),
+        parameter_block_ordering->NumElements());
+    return false;
+  }
+
+  if (parameter_block_ordering->NumGroups() == 1) {
+    // If the user supplied an parameter_block_ordering with just one
+    // group, it is equivalent to the user supplying NULL as an
+    // parameter_block_ordering. Ceres is completely free to choose the
+    // parameter block ordering as it sees fit. For Schur type solvers,
+    // this means that the user wishes for Ceres to identify the
+    // e_blocks, which we do by computing a maximal independent set.
+    vector<ParameterBlock*> schur_ordering;
+    const int size_of_first_elimination_group =
+        ComputeStableSchurOrdering(*program, &schur_ordering);
+
+    CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
+        << "Congratulations, you found a Ceres bug! Please report this error "
+        << "to the developers.";
+
+    // Update the parameter_block_ordering object.
+    for (int i = 0; i < schur_ordering.size(); ++i) {
+      double* parameter_block = schur_ordering[i]->mutable_user_state();
+      const int group_id = (i < size_of_first_elimination_group) ? 0 : 1;
+      parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
+    }
+
+    // We could call ApplyOrdering but this is cheaper and
+    // simpler.
+    swap(*program->mutable_parameter_blocks(), schur_ordering);
+  } else {
+    // The user provided an ordering with more than one elimination
+    // group.
+
+    // Verify that the first elimination group is an independent set.
+    const set<double*>& first_elimination_group =
+        parameter_block_ordering
+        ->group_to_elements()
+        .begin()
+        ->second;
+    if (!program->IsParameterBlockSetIndependent(first_elimination_group)) {
+      *error =
+          StringPrintf("The first elimination group in the parameter block "
+                       "ordering of size %zd is not an independent set",
+                       first_elimination_group.size());
+      return false;
+    }
+
+    if (!ApplyOrdering(parameter_map,
+                       *parameter_block_ordering,
+                       program,
+                       error)) {
+      return false;
+    }
+  }
+
+  program->SetParameterOffsetsAndIndex();
+
+  const int size_of_first_elimination_group =
+      parameter_block_ordering->group_to_elements().begin()->second.size();
+
+  if (linear_solver_type == SPARSE_SCHUR) {
+    if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+      MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+          *parameter_block_ordering,
+          program);
+    } else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+      MaybeReorderSchurComplementColumnsUsingEigen(
+          size_of_first_elimination_group,
+          parameter_map,
+          program);
+    }
+  }
+
+  // Schur type solvers also require that their residual blocks be
+  // lexicographically ordered.
+  if (!LexicographicallyOrderResidualBlocks(size_of_first_elimination_group,
+                                            program,
+                                            error)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool ReorderProgramForSparseNormalCholesky(
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program,
+    string* error) {
+  if (parameter_block_ordering.NumElements() != program->NumParameterBlocks()) {
+    *error = StringPrintf(
+        "The program has %d parameter blocks, but the parameter block "
+        "ordering has %d parameter blocks.",
+        program->NumParameterBlocks(),
+        parameter_block_ordering.NumElements());
+    return false;
+  }
+
+  // Compute a block sparse presentation of J'.
+  std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+      program->CreateJacobianBlockSparsityTranspose());
+
+  vector<int> ordering(program->NumParameterBlocks(), 0);
+  vector<ParameterBlock*>& parameter_blocks =
+      *(program->mutable_parameter_blocks());
+
+  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    OrderingForSparseNormalCholeskyUsingSuiteSparse(
+        *tsm_block_jacobian_transpose,
+        parameter_blocks,
+        parameter_block_ordering,
+        &ordering[0]);
+  } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
+    OrderingForSparseNormalCholeskyUsingCXSparse(
+        *tsm_block_jacobian_transpose,
+        &ordering[0]);
+  } else if (sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
+    // Accelerate does not provide a function to perform reordering without
+    // performing a full symbolic factorisation.  As such, we have nothing
+    // to gain from trying to reorder the problem here, as it will happen
+    // in AppleAccelerateCholesky::Factorize() (once) and reordering here
+    // would involve performing two symbolic factorisations instead of one
+    // which would have a negative overall impact on performance.
+    return true;
+
+  } else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
+       OrderingForSparseNormalCholeskyUsingEigenSparse(
+        *tsm_block_jacobian_transpose,
+        &ordering[0]);
+#else
+    // For Eigen versions less than 3.2.2, there is nothing to do as
+    // older versions of Eigen do not expose a method for doing
+    // symbolic analysis on pre-ordered matrices, so a block
+    // pre-ordering is a bit pointless.
+
+    return true;
+#endif
+  }
+
+  // Apply ordering.
+  const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+  for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+    parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+  }
+
+  program->SetParameterOffsetsAndIndex();
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/reorder_program.h b/internal/ceres/reorder_program.h
new file mode 100644
index 0000000..36e5d16
--- /dev/null
+++ b/internal/ceres/reorder_program.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_REORDER_PROGRAM_H_
+#define CERES_INTERNAL_REORDER_PROGRAM_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Reorder the parameter blocks in program using the ordering
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+                   const ParameterBlockOrdering& ordering,
+                   Program* program,
+                   std::string* error);
+
+// Reorder the residuals for program, if necessary, so that the residuals
+// involving each E block occur together. This is a necessary condition for the
+// Schur eliminator, which works on these "row blocks" in the jacobian.
+bool LexicographicallyOrderResidualBlocks(int size_of_first_elimination_group,
+                                          Program* program,
+                                          std::string* error);
+
+// Schur type solvers require that all parameter blocks eliminated
+// by the Schur eliminator occur before others and the residuals be
+// sorted in lexicographic order of their parameter blocks.
+//
+// If the parameter_block_ordering only contains one elimination
+// group then a maximal independent set is computed and used as the
+// first elimination group, otherwise the user's ordering is used.
+//
+// If the linear solver type is SPARSE_SCHUR and support for
+// constrained fill-reducing ordering is available in the sparse
+// linear algebra library (SuiteSparse version >= 4.2.0) then
+// columns of the schur complement matrix are ordered to reduce the
+// fill-in the Cholesky factorization.
+//
+// Upon return, ordering contains the parameter block ordering that
+// was used to order the program.
+bool ReorderProgramForSchurTypeLinearSolver(
+    LinearSolverType linear_solver_type,
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ProblemImpl::ParameterMap& parameter_map,
+    ParameterBlockOrdering* parameter_block_ordering,
+    Program* program,
+    std::string* error);
+
+// Sparse cholesky factorization routines when doing the sparse
+// cholesky factorization of the Jacobian matrix, reorders its
+// columns to reduce the fill-in. Compute this permutation and
+// re-order the parameter blocks.
+//
+// When using SuiteSparse, if the parameter_block_ordering contains
+// more than one elimination group and support for constrained
+// fill-reducing ordering is available in the sparse linear algebra
+// library (SuiteSparse version >= 4.2.0) then the fill reducing
+// ordering will take it into account, otherwise it will be ignored.
+bool ReorderProgramForSparseNormalCholesky(
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const ParameterBlockOrdering& parameter_block_ordering,
+    Program* program,
+    std::string* error);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_REORDER_PROGRAM_
diff --git a/internal/ceres/reorder_program_test.cc b/internal/ceres/reorder_program_test.cc
new file mode 100644
index 0000000..cf3e9f6
--- /dev/null
+++ b/internal/ceres/reorder_program_test.cc
@@ -0,0 +1,253 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int... Ns>
+class MockCostFunctionBase : public SizedCostFunction<kNumResiduals, Ns...> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    // Do nothing. This is never called.
+    return true;
+  }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(_, ReorderResidualBlockNormalFunction) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
+  problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+  problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
+
+  ParameterBlockOrdering* linear_solver_ordering = new ParameterBlockOrdering;
+  linear_solver_ordering->AddElementToGroup(&x, 0);
+  linear_solver_ordering->AddElementToGroup(&y, 0);
+  linear_solver_ordering->AddElementToGroup(&z, 1);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(linear_solver_ordering);
+
+  const vector<ResidualBlock*>& residual_blocks =
+      problem.program().residual_blocks();
+
+  vector<ResidualBlock*> expected_residual_blocks;
+
+  // This is a bit fragile, but it serves the purpose. We know the
+  // bucketing algorithm that the reordering function uses, so we
+  // expect the order for residual blocks for each e_block to be
+  // filled in reverse.
+  expected_residual_blocks.push_back(residual_blocks[4]);
+  expected_residual_blocks.push_back(residual_blocks[1]);
+  expected_residual_blocks.push_back(residual_blocks[0]);
+  expected_residual_blocks.push_back(residual_blocks[5]);
+  expected_residual_blocks.push_back(residual_blocks[2]);
+  expected_residual_blocks.push_back(residual_blocks[3]);
+
+  Program* program = problem.mutable_program();
+  program->SetParameterOffsetsAndIndex();
+
+  std::string message;
+  EXPECT_TRUE(LexicographicallyOrderResidualBlocks(
+                  2,
+                  problem.mutable_program(),
+                  &message));
+  EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
+  for (int i = 0; i < expected_residual_blocks.size(); ++i) {
+    EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
+  }
+}
+
+TEST(_, ApplyOrderingOrderingTooSmall) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x, 0);
+  linear_solver_ordering.AddElementToGroup(&y, 1);
+
+  Program program(problem.program());
+  std::string message;
+  EXPECT_FALSE(ApplyOrdering(problem.parameter_map(),
+                             linear_solver_ordering,
+                             &program,
+                             &message));
+}
+
+TEST(_, ApplyOrderingNormal) {
+  ProblemImpl problem;
+  double x;
+  double y;
+  double z;
+
+  problem.AddParameterBlock(&x, 1);
+  problem.AddParameterBlock(&y, 1);
+  problem.AddParameterBlock(&z, 1);
+
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x, 0);
+  linear_solver_ordering.AddElementToGroup(&y, 2);
+  linear_solver_ordering.AddElementToGroup(&z, 1);
+
+  Program* program = problem.mutable_program();
+  std::string message;
+
+  EXPECT_TRUE(ApplyOrdering(problem.parameter_map(),
+                            linear_solver_ordering,
+                            program,
+                            &message));
+  const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+
+  EXPECT_EQ(parameter_blocks.size(), 3);
+  EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
+  EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
+  EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
+}
+
+#ifndef CERES_NO_SUITESPARSE
+class ReorderProgramForSparseNormalCholeskyUsingSuiteSparseTest :
+      public ::testing::Test {
+ protected:
+  void SetUp() {
+    problem_.AddResidualBlock(new UnaryCostFunction(), NULL, &x_);
+    problem_.AddResidualBlock(new BinaryCostFunction(), NULL, &z_, &x_);
+    problem_.AddResidualBlock(new BinaryCostFunction(), NULL, &z_, &y_);
+    problem_.AddResidualBlock(new UnaryCostFunction(), NULL, &z_);
+    problem_.AddResidualBlock(new BinaryCostFunction(), NULL, &x_, &y_);
+    problem_.AddResidualBlock(new UnaryCostFunction(), NULL, &y_);
+  }
+
+  void ComputeAndValidateOrdering(
+      const ParameterBlockOrdering& linear_solver_ordering) {
+    Program* program = problem_.mutable_program();
+    vector<ParameterBlock*> unordered_parameter_blocks =
+        program->parameter_blocks();
+
+    std::string error;
+    EXPECT_TRUE(ReorderProgramForSparseNormalCholesky(
+                    ceres::SUITE_SPARSE,
+                    linear_solver_ordering,
+                    program,
+                    &error));
+    const vector<ParameterBlock*>& ordered_parameter_blocks =
+        program->parameter_blocks();
+    EXPECT_EQ(ordered_parameter_blocks.size(),
+              unordered_parameter_blocks.size());
+
+    EXPECT_THAT(unordered_parameter_blocks,
+                ::testing::UnorderedElementsAreArray(ordered_parameter_blocks));
+  }
+
+  ProblemImpl problem_;
+  double x_;
+  double y_;
+  double z_;
+};
+
+TEST_F(ReorderProgramForSparseNormalCholeskyUsingSuiteSparseTest,
+       EverythingInGroupZero) {
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x_, 0);
+  linear_solver_ordering.AddElementToGroup(&y_, 0);
+  linear_solver_ordering.AddElementToGroup(&z_, 0);
+
+  ComputeAndValidateOrdering(linear_solver_ordering);
+}
+
+TEST_F(ReorderProgramForSparseNormalCholeskyUsingSuiteSparseTest,
+       ContiguousGroups) {
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x_, 0);
+  linear_solver_ordering.AddElementToGroup(&y_, 1);
+  linear_solver_ordering.AddElementToGroup(&z_, 2);
+
+  ComputeAndValidateOrdering(linear_solver_ordering);
+}
+
+TEST_F(ReorderProgramForSparseNormalCholeskyUsingSuiteSparseTest,
+       GroupsWithGaps) {
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x_, 0);
+  linear_solver_ordering.AddElementToGroup(&y_, 2);
+  linear_solver_ordering.AddElementToGroup(&z_, 2);
+
+  ComputeAndValidateOrdering(linear_solver_ordering);
+}
+
+TEST_F(ReorderProgramForSparseNormalCholeskyUsingSuiteSparseTest,
+       NonContiguousStartingAtTwo) {
+  ParameterBlockOrdering linear_solver_ordering;
+  linear_solver_ordering.AddElementToGroup(&x_, 2);
+  linear_solver_ordering.AddElementToGroup(&y_, 4);
+  linear_solver_ordering.AddElementToGroup(&z_, 4);
+
+  ComputeAndValidateOrdering(linear_solver_ordering);
+}
+#endif  // CERES_NO_SUITESPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/residual_block.cc b/internal/ceres/residual_block.cc
new file mode 100644
index 0000000..7582e92
--- /dev/null
+++ b/internal/ceres/residual_block.cc
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/residual_block.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <vector>
+#include "ceres/corrector.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block_utils.h"
+#include "ceres/cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/small_blas.h"
+
+using Eigen::Dynamic;
+
+namespace ceres {
+namespace internal {
+
+ResidualBlock::ResidualBlock(
+    const CostFunction* cost_function, const LossFunction* loss_function,
+    const std::vector<ParameterBlock*>& parameter_blocks, int index)
+    : cost_function_(cost_function),
+      loss_function_(loss_function),
+      parameter_blocks_(
+          new ParameterBlock*[cost_function->parameter_block_sizes().size()]),
+      index_(index) {
+  CHECK(cost_function_ != nullptr);
+  std::copy(parameter_blocks.begin(),
+            parameter_blocks.end(),
+            parameter_blocks_.get());
+}
+
+bool ResidualBlock::Evaluate(const bool apply_loss_function,
+                             double* cost,
+                             double* residuals,
+                             double** jacobians,
+                             double* scratch) const {
+  const int num_parameter_blocks = NumParameterBlocks();
+  const int num_residuals = cost_function_->num_residuals();
+
+  // Collect the parameters from their blocks. This will rarely allocate, since
+  // residuals taking more than 8 parameter block arguments are rare.
+  FixedArray<const double*, 8> parameters(num_parameter_blocks);
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    parameters[i] = parameter_blocks_[i]->state();
+  }
+
+  // Put pointers into the scratch space into global_jacobians as appropriate.
+  FixedArray<double*, 8> global_jacobians(num_parameter_blocks);
+  if (jacobians != NULL) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      const ParameterBlock* parameter_block = parameter_blocks_[i];
+      if (jacobians[i] != NULL &&
+          parameter_block->LocalParameterizationJacobian() != NULL) {
+        global_jacobians[i] = scratch;
+        scratch += num_residuals * parameter_block->Size();
+      } else {
+        global_jacobians[i] = jacobians[i];
+      }
+    }
+  }
+
+  // If the caller didn't request residuals, use the scratch space for them.
+  bool outputting_residuals = (residuals != NULL);
+  if (!outputting_residuals) {
+    residuals = scratch;
+  }
+
+  // Invalidate the evaluation buffers so that we can check them after
+  // the CostFunction::Evaluate call, to see if all the return values
+  // that were required were written to and that they are finite.
+  double** eval_jacobians = (jacobians != NULL) ? global_jacobians.get() : NULL;
+
+  InvalidateEvaluation(*this, cost, residuals, eval_jacobians);
+
+  if (!cost_function_->Evaluate(parameters.get(), residuals, eval_jacobians)) {
+    return false;
+  }
+
+  if (!IsEvaluationValid(*this,
+                         parameters.get(),
+                         cost,
+                         residuals,
+                         eval_jacobians)) {
+    std::string message =
+        "\n\n"
+        "Error in evaluating the ResidualBlock.\n\n"
+        "There are two possible reasons. Either the CostFunction did not evaluate and fill all    \n"  // NOLINT
+        "residual and jacobians that were requested or there was a non-finite value (nan/infinite)\n"  // NOLINT
+        "generated during the or jacobian computation. \n\n" +
+        EvaluationToString(*this,
+                           parameters.get(),
+                           cost,
+                           residuals,
+                           eval_jacobians);
+    LOG(WARNING) << message;
+    return false;
+  }
+
+  double squared_norm = VectorRef(residuals, num_residuals).squaredNorm();
+
+  // Update the jacobians with the local parameterizations.
+  if (jacobians != NULL) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      if (jacobians[i] != NULL) {
+        const ParameterBlock* parameter_block = parameter_blocks_[i];
+
+        // Apply local reparameterization to the jacobians.
+        if (parameter_block->LocalParameterizationJacobian() != NULL) {
+          // jacobians[i] = global_jacobians[i] * global_to_local_jacobian.
+          MatrixMatrixMultiply<Dynamic, Dynamic, Dynamic, Dynamic, 0>(
+              global_jacobians[i],
+              num_residuals,
+              parameter_block->Size(),
+              parameter_block->LocalParameterizationJacobian(),
+              parameter_block->Size(),
+              parameter_block->LocalSize(),
+              jacobians[i], 0, 0,  num_residuals, parameter_block->LocalSize());
+        }
+      }
+    }
+  }
+
+  if (loss_function_ == NULL || !apply_loss_function) {
+    *cost = 0.5 * squared_norm;
+    return true;
+  }
+
+  double rho[3];
+  loss_function_->Evaluate(squared_norm, rho);
+  *cost = 0.5 * rho[0];
+
+  // No jacobians and not outputting residuals? All done. Doing an early exit
+  // here avoids constructing the "Corrector" object below in a common case.
+  if (jacobians == NULL && !outputting_residuals) {
+    return true;
+  }
+
+  // Correct for the effects of the loss function. The jacobians need to be
+  // corrected before the residuals, since they use the uncorrected residuals.
+  Corrector correct(squared_norm, rho);
+  if (jacobians != NULL) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      if (jacobians[i] != NULL) {
+        const ParameterBlock* parameter_block = parameter_blocks_[i];
+
+        // Correct the jacobians for the loss function.
+        correct.CorrectJacobian(num_residuals,
+                                parameter_block->LocalSize(),
+                                residuals,
+                                jacobians[i]);
+      }
+    }
+  }
+
+  // Correct the residuals with the loss function.
+  if (outputting_residuals) {
+    correct.CorrectResiduals(num_residuals, residuals);
+  }
+  return true;
+}
+
+int ResidualBlock::NumScratchDoublesForEvaluate() const {
+  // Compute the amount of scratch space needed to store the full-sized
+  // jacobians. For parameters that have no local parameterization  no storage
+  // is needed and the passed-in jacobian array is used directly. Also include
+  // space to store the residuals, which is needed for cost-only evaluations.
+  // This is slightly pessimistic, since both won't be needed all the time, but
+  // the amount of excess should not cause problems for the caller.
+  int num_parameters = NumParameterBlocks();
+  int scratch_doubles = 1;
+  for (int i = 0; i < num_parameters; ++i) {
+    const ParameterBlock* parameter_block = parameter_blocks_[i];
+    if (!parameter_block->IsConstant() &&
+        parameter_block->LocalParameterizationJacobian() != NULL) {
+      scratch_doubles += parameter_block->Size();
+    }
+  }
+  scratch_doubles *= NumResiduals();
+  return scratch_doubles;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/residual_block.h b/internal/ceres/residual_block.h
new file mode 100644
index 0000000..6964c6d
--- /dev/null
+++ b/internal/ceres/residual_block.h
@@ -0,0 +1,149 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         keir@google.com (Keir Mierle)
+//
+// Purpose : Class and struct definitions for parameter and residual blocks.
+
+#ifndef CERES_INTERNAL_RESIDUAL_BLOCK_H_
+#define CERES_INTERNAL_RESIDUAL_BLOCK_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/port.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class LossFunction;
+
+namespace internal {
+
+class ParameterBlock;
+
+// A term in the least squares problem. The mathematical form of each term in
+// the overall least-squares cost function is:
+//
+//    1
+//   --- loss_function( || cost_function(block1, block2, ...) ||^2  ),
+//    2
+//
+// Storing the cost function and the loss function separately permits optimizing
+// the problem with standard non-linear least techniques, without requiring a
+// more general non-linear solver.
+//
+// The residual block stores pointers to but does not own the cost functions,
+// loss functions, and parameter blocks.
+class ResidualBlock {
+ public:
+  // Construct the residual block with the given cost/loss functions. Loss may
+  // be null. The index is the index of the residual block in the Program's
+  // residual_blocks array.
+  ResidualBlock(const CostFunction* cost_function,
+                const LossFunction* loss_function,
+                const std::vector<ParameterBlock*>& parameter_blocks,
+                int index);
+
+  // Evaluates the residual term, storing the scalar cost in *cost, the residual
+  // components in *residuals, and the jacobians between the parameters and
+  // residuals in jacobians[i], in row-major order. If residuals is NULL, the
+  // residuals are not computed. If jacobians is NULL, no jacobians are
+  // computed. If jacobians[i] is NULL, then the jacobian for that parameter is
+  // not computed.
+  //
+  // Evaluate needs scratch space which must be supplied by the caller via
+  // scratch. The array should have at least NumScratchDoublesForEvaluate()
+  // space available.
+  //
+  // The return value indicates the success or failure. If the function returns
+  // false, the caller should expect the output memory locations to have
+  // been modified.
+  //
+  // The returned cost and jacobians have had robustification and local
+  // parameterizations applied already; for example, the jacobian for a
+  // 4-dimensional quaternion parameter using the "QuaternionParameterization"
+  // is num_residuals by 3 instead of num_residuals by 4.
+  //
+  // apply_loss_function as the name implies allows the user to switch
+  // the application of the loss function on and off.
+  bool Evaluate(bool apply_loss_function,
+                double* cost,
+                double* residuals,
+                double** jacobians,
+                double* scratch) const;
+
+
+  const CostFunction* cost_function() const { return cost_function_; }
+  const LossFunction* loss_function() const { return loss_function_; }
+
+  // Access the parameter blocks for this residual. The array has size
+  // NumParameterBlocks().
+  ParameterBlock* const* parameter_blocks() const {
+    return parameter_blocks_.get();
+  }
+
+  // Number of variable blocks that this residual term depends on.
+  int NumParameterBlocks() const {
+    return cost_function_->parameter_block_sizes().size();
+  }
+
+  // The size of the residual vector returned by this residual function.
+  int NumResiduals() const { return cost_function_->num_residuals(); }
+
+  // The minimum amount of scratch space needed to pass to Evaluate().
+  int NumScratchDoublesForEvaluate() const;
+
+  // This residual block's index in an array.
+  int index() const { return index_; }
+  void set_index(int index) { index_ = index; }
+
+  std::string ToString() const {
+    return StringPrintf("{residual block; index=%d}", index_);
+  }
+
+ private:
+  const CostFunction* cost_function_;
+  const LossFunction* loss_function_;
+  std::unique_ptr<ParameterBlock*[]> parameter_blocks_;
+
+  // The index of the residual, typically in a Program. This is only to permit
+  // switching from a ResidualBlock* to an index in the Program's array, needed
+  // to do efficient removals.
+  int32_t index_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_RESIDUAL_BLOCK_H_
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
new file mode 100644
index 0000000..3a33be7
--- /dev/null
+++ b/internal/ceres/residual_block_test.cc
@@ -0,0 +1,329 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/residual_block.h"
+
+#include <cstdint>
+#include "gtest/gtest.h"
+#include "ceres/parameter_block.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+  TernaryCostFunction(int num_residuals,
+                      int32_t parameter_block1_size,
+                      int32_t parameter_block2_size,
+                      int32_t parameter_block3_size) {
+    set_num_residuals(num_residuals);
+    mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+    mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+  }
+
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = i;
+    }
+    if (jacobians) {
+      for (int k = 0; k < 3; ++k) {
+        if (jacobians[k] != NULL) {
+          MatrixRef jacobian(jacobians[k],
+                             num_residuals(),
+                             parameter_block_sizes()[k]);
+          jacobian.setConstant(k);
+        }
+      }
+    }
+    return true;
+  }
+};
+
+TEST(ResidualBlock, EvaluteWithNoLossFunctionOrLocalParameterizations) {
+  double scratch[64];
+
+  // Prepare the parameter blocks.
+  double values_x[2];
+  ParameterBlock x(values_x, 2, -1);
+
+  double values_y[3];
+  ParameterBlock y(values_y, 3, -1);
+
+  double values_z[4];
+  ParameterBlock z(values_z, 4, -1);
+
+  vector<ParameterBlock*> parameters;
+  parameters.push_back(&x);
+  parameters.push_back(&y);
+  parameters.push_back(&z);
+
+  TernaryCostFunction cost_function(3, 2, 3, 4);
+
+  // Create the object under tests.
+  ResidualBlock residual_block(&cost_function, NULL, parameters, -1);
+
+  // Verify getters.
+  EXPECT_EQ(&cost_function, residual_block.cost_function());
+  EXPECT_EQ(NULL, residual_block.loss_function());
+  EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
+  EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
+  EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
+  EXPECT_EQ(3, residual_block.NumScratchDoublesForEvaluate());
+
+  // Verify cost-only evaluation.
+  double cost;
+  residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+
+  // Verify cost and residual evaluation.
+  double residuals[3];
+  residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  // Verify cost, residual, and jacobian evaluation.
+  cost = 0.0;
+  VectorRef(residuals, 3).setConstant(0.0);
+
+  Matrix jacobian_rx(3, 2);
+  Matrix jacobian_ry(3, 3);
+  Matrix jacobian_rz(3, 4);
+
+  jacobian_rx.setConstant(-1.0);
+  jacobian_ry.setConstant(-1.0);
+  jacobian_rz.setConstant(-1.0);
+
+  double *jacobian_ptrs[3] = {
+    jacobian_rx.data(),
+    jacobian_ry.data(),
+    jacobian_rz.data()
+  };
+
+  residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  EXPECT_TRUE((jacobian_rx.array() == 0.0).all()) << "\n" << jacobian_rx;
+  EXPECT_TRUE((jacobian_ry.array() == 1.0).all()) << "\n" << jacobian_ry;
+  EXPECT_TRUE((jacobian_rz.array() == 2.0).all()) << "\n" << jacobian_rz;
+
+  // Verify cost, residual, and partial jacobian evaluation.
+  cost = 0.0;
+  VectorRef(residuals, 3).setConstant(0.0);
+  jacobian_rx.setConstant(-1.0);
+  jacobian_ry.setConstant(-1.0);
+  jacobian_rz.setConstant(-1.0);
+
+  jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
+
+  residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  EXPECT_TRUE((jacobian_rx.array() ==  0.0).all()) << "\n" << jacobian_rx;
+  EXPECT_TRUE((jacobian_ry.array() == -1.0).all()) << "\n" << jacobian_ry;
+  EXPECT_TRUE((jacobian_rz.array() ==  2.0).all()) << "\n" << jacobian_rz;
+}
+
+// Trivial cost function that accepts three arguments.
+class LocallyParameterizedCostFunction: public SizedCostFunction<3, 2, 3, 4> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    for (int i = 0; i < num_residuals(); ++i) {
+      residuals[i] = i;
+    }
+    if (jacobians) {
+      for (int k = 0; k < 3; ++k) {
+        // The jacobians here are full sized, but they are transformed in the
+        // evaluator into the "local" jacobian. In the tests, the "subset
+        // constant" parameterization is used, which should pick out columns
+        // from these jacobians. Put values in the jacobian that make this
+        // obvious; in particular, make the jacobians like this:
+        //
+        //   0 1 2 3 4 ...
+        //   0 1 2 3 4 ...
+        //   0 1 2 3 4 ...
+        //
+        if (jacobians[k] != NULL) {
+          MatrixRef jacobian(jacobians[k],
+                             num_residuals(),
+                             parameter_block_sizes()[k]);
+          for (int j = 0; j < k + 2; ++j) {
+            jacobian.col(j).setConstant(j);
+          }
+        }
+      }
+    }
+    return true;
+  }
+};
+
+TEST(ResidualBlock, EvaluteWithLocalParameterizations) {
+  double scratch[64];
+
+  // Prepare the parameter blocks.
+  double values_x[2];
+  ParameterBlock x(values_x, 2, -1);
+
+  double values_y[3];
+  ParameterBlock y(values_y, 3, -1);
+
+  double values_z[4];
+  ParameterBlock z(values_z, 4, -1);
+
+  vector<ParameterBlock*> parameters;
+  parameters.push_back(&x);
+  parameters.push_back(&y);
+  parameters.push_back(&z);
+
+  // Make x have the first component fixed.
+  vector<int> x_fixed;
+  x_fixed.push_back(0);
+  SubsetParameterization x_parameterization(2, x_fixed);
+  x.SetParameterization(&x_parameterization);
+
+  // Make z have the last and last component fixed.
+  vector<int> z_fixed;
+  z_fixed.push_back(2);
+  SubsetParameterization z_parameterization(4, z_fixed);
+  z.SetParameterization(&z_parameterization);
+
+  LocallyParameterizedCostFunction cost_function;
+
+  // Create the object under tests.
+  ResidualBlock residual_block(&cost_function, NULL, parameters, -1);
+
+  // Verify getters.
+  EXPECT_EQ(&cost_function, residual_block.cost_function());
+  EXPECT_EQ(NULL, residual_block.loss_function());
+  EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
+  EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
+  EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
+  EXPECT_EQ(3*(2 + 4) + 3, residual_block.NumScratchDoublesForEvaluate());
+
+  // Verify cost-only evaluation.
+  double cost;
+  residual_block.Evaluate(true, &cost, NULL, NULL, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+
+  // Verify cost and residual evaluation.
+  double residuals[3];
+  residual_block.Evaluate(true, &cost, residuals, NULL, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  // Verify cost, residual, and jacobian evaluation.
+  cost = 0.0;
+  VectorRef(residuals, 3).setConstant(0.0);
+
+  Matrix jacobian_rx(3, 1);  // Since the first element is fixed.
+  Matrix jacobian_ry(3, 3);
+  Matrix jacobian_rz(3, 3);  // Since the third element is fixed.
+
+  jacobian_rx.setConstant(-1.0);
+  jacobian_ry.setConstant(-1.0);
+  jacobian_rz.setConstant(-1.0);
+
+  double *jacobian_ptrs[3] = {
+    jacobian_rx.data(),
+    jacobian_ry.data(),
+    jacobian_rz.data()
+  };
+
+  residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  Matrix expected_jacobian_rx(3, 1);
+  expected_jacobian_rx << 1.0, 1.0, 1.0;
+
+  Matrix expected_jacobian_ry(3, 3);
+  expected_jacobian_ry << 0.0, 1.0, 2.0,
+                          0.0, 1.0, 2.0,
+                          0.0, 1.0, 2.0;
+
+  Matrix expected_jacobian_rz(3, 3);
+  expected_jacobian_rz << 0.0, 1.0, /* 2.0, */ 3.0,  // 3rd parameter constant.
+                          0.0, 1.0, /* 2.0, */ 3.0,
+                          0.0, 1.0, /* 2.0, */ 3.0;
+
+  EXPECT_EQ(expected_jacobian_rx, jacobian_rx)
+      << "\nExpected:\n" << expected_jacobian_rx
+      << "\nActual:\n"   << jacobian_rx;
+  EXPECT_EQ(expected_jacobian_ry, jacobian_ry)
+      << "\nExpected:\n" << expected_jacobian_ry
+      << "\nActual:\n"   << jacobian_ry;
+  EXPECT_EQ(expected_jacobian_rz, jacobian_rz)
+      << "\nExpected:\n " << expected_jacobian_rz
+      << "\nActual:\n"   << jacobian_rz;
+
+  // Verify cost, residual, and partial jacobian evaluation.
+  cost = 0.0;
+  VectorRef(residuals, 3).setConstant(0.0);
+  jacobian_rx.setConstant(-1.0);
+  jacobian_ry.setConstant(-1.0);
+  jacobian_rz.setConstant(-1.0);
+
+  jacobian_ptrs[1] = NULL;  // Don't compute the jacobian for y.
+
+  residual_block.Evaluate(true, &cost, residuals, jacobian_ptrs, scratch);
+  EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+  EXPECT_EQ(0.0, residuals[0]);
+  EXPECT_EQ(1.0, residuals[1]);
+  EXPECT_EQ(2.0, residuals[2]);
+
+  EXPECT_EQ(expected_jacobian_rx, jacobian_rx);
+  EXPECT_TRUE((jacobian_ry.array() == -1.0).all()) << "\n" << jacobian_ry;
+  EXPECT_EQ(expected_jacobian_rz, jacobian_rz);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
new file mode 100644
index 0000000..35e928b
--- /dev/null
+++ b/internal/ceres/residual_block_utils.cc
@@ -0,0 +1,142 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/residual_block_utils.h"
+
+#include <cmath>
+#include <cstddef>
+#include <limits>
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+void InvalidateEvaluation(const ResidualBlock& block,
+                          double* cost,
+                          double* residuals,
+                          double** jacobians) {
+  const int num_parameter_blocks = block.NumParameterBlocks();
+  const int num_residuals = block.NumResiduals();
+
+  InvalidateArray(1, cost);
+  InvalidateArray(num_residuals, residuals);
+  if (jacobians != NULL) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      const int parameter_block_size = block.parameter_blocks()[i]->Size();
+      InvalidateArray(num_residuals * parameter_block_size, jacobians[i]);
+    }
+  }
+}
+
+string EvaluationToString(const ResidualBlock& block,
+                          double const* const* parameters,
+                          double* cost,
+                          double* residuals,
+                          double** jacobians) {
+  CHECK(cost != nullptr);
+  CHECK(residuals != nullptr);
+
+  const int num_parameter_blocks = block.NumParameterBlocks();
+  const int num_residuals = block.NumResiduals();
+  string result = "";
+
+  StringAppendF(&result,
+                "Residual Block size: %d parameter blocks x %d residuals\n\n",
+                num_parameter_blocks, num_residuals);
+  result +=
+      "For each parameter block, the value of the parameters are printed in the first column   \n"  // NOLINT
+      "and the value of the jacobian under the corresponding residual. If a ParameterBlock was \n"  // NOLINT
+      "held constant then the corresponding jacobian is printed as 'Not Computed'. If an entry \n"  // NOLINT
+      "of the Jacobian/residual array was requested but was not written to by user code, it is \n"  // NOLINT
+      "indicated by 'Uninitialized'. This is an error. Residuals or Jacobian values evaluating \n"  // NOLINT
+      "to Inf or NaN is also an error.  \n\n"; // NOLINT
+
+  string space = "Residuals:     ";
+  result += space;
+  AppendArrayToString(num_residuals, residuals, &result);
+  StringAppendF(&result, "\n\n");
+
+  for (int i = 0; i < num_parameter_blocks; ++i) {
+    const int parameter_block_size = block.parameter_blocks()[i]->Size();
+    StringAppendF(
+        &result, "Parameter Block %d, size: %d\n", i, parameter_block_size);
+    StringAppendF(&result, "\n");
+    for (int j = 0; j < parameter_block_size; ++j) {
+      AppendArrayToString(1, parameters[i] + j, &result);
+      StringAppendF(&result, "| ");
+      for (int k = 0; k < num_residuals; ++k) {
+        AppendArrayToString(1,
+                            (jacobians != NULL && jacobians[i] != NULL)
+                            ? jacobians[i] + k * parameter_block_size + j
+                            : NULL,
+                            &result);
+      }
+      StringAppendF(&result, "\n");
+    }
+    StringAppendF(&result, "\n");
+  }
+  StringAppendF(&result, "\n");
+  return result;
+}
+
+bool IsEvaluationValid(const ResidualBlock& block,
+                       double const* const* parameters,
+                       double* cost,
+                       double* residuals,
+                       double** jacobians) {
+  const int num_parameter_blocks = block.NumParameterBlocks();
+  const int num_residuals = block.NumResiduals();
+
+  if (!IsArrayValid(num_residuals, residuals)) {
+    return false;
+  }
+
+  if (jacobians != NULL) {
+    for (int i = 0; i < num_parameter_blocks; ++i) {
+      const int parameter_block_size = block.parameter_blocks()[i]->Size();
+      if (!IsArrayValid(num_residuals * parameter_block_size, jacobians[i])) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/residual_block_utils.h b/internal/ceres/residual_block_utils.h
new file mode 100644
index 0000000..627337f
--- /dev/null
+++ b/internal/ceres/residual_block_utils.h
@@ -0,0 +1,80 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Utility routines for ResidualBlock evaluation.
+//
+// These are useful for detecting two common class of errors.
+//
+// 1. Uninitialized memory - where the user for some reason did not
+// compute part of a cost/residual/jacobian.
+//
+// 2. Numerical failure while computing the cost/residual/jacobian,
+// e.g. NaN, infinities etc. This is particularly useful since the
+// automatic differentiation code does computations that are not
+// evident to the user and can silently generate hard to debug errors.
+
+#ifndef CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
+#define CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class ResidualBlock;
+
+// Invalidate cost, resdual and jacobian arrays (if not NULL).
+void InvalidateEvaluation(const ResidualBlock& block,
+                          double* cost,
+                          double* residuals,
+                          double** jacobians);
+
+// Check if any of the arrays cost, residuals or jacobians contains an
+// NaN, return true if it does.
+bool IsEvaluationValid(const ResidualBlock& block,
+                       double const* const* parameters,
+                       double* cost,
+                       double* residuals,
+                       double** jacobians);
+
+// Create a string representation of the Residual block containing the
+// value of the parameters, residuals and jacobians if present.
+// Useful for debugging output.
+std::string EvaluationToString(const ResidualBlock& block,
+                               double const* const* parameters,
+                               double* cost,
+                               double* residuals,
+                               double** jacobians);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
diff --git a/internal/ceres/residual_block_utils_test.cc b/internal/ceres/residual_block_utils_test.cc
new file mode 100644
index 0000000..3beaa10
--- /dev/null
+++ b/internal/ceres/residual_block_utils_test.cc
@@ -0,0 +1,167 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <limits>
+#include <memory>
+#include "gtest/gtest.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/residual_block_utils.h"
+#include "ceres/cost_function.h"
+#include "ceres/sized_cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+// Routine to check if ResidualBlock::Evaluate for unary CostFunction
+// with one residual succeeds with true or dies.
+void CheckEvaluation(const CostFunction& cost_function, bool is_good) {
+  double x = 1.0;
+  ParameterBlock parameter_block(&x, 1, -1);
+  std::vector<ParameterBlock*> parameter_blocks;
+  parameter_blocks.push_back(&parameter_block);
+
+  ResidualBlock residual_block(&cost_function,
+                               NULL,
+                               parameter_blocks,
+                               -1);
+
+  std::unique_ptr<double[]> scratch(
+      new double[residual_block.NumScratchDoublesForEvaluate()]);
+
+  double cost;
+  double residuals;
+  double jacobian;
+  double* jacobians[] = { &jacobian };
+
+  EXPECT_EQ(residual_block.Evaluate(true,
+                                    &cost,
+                                    &residuals,
+                                    jacobians,
+                                    scratch.get()), is_good);
+}
+
+// A CostFunction that behaves normaly, i.e., it computes numerically
+// valid residuals and jacobians.
+class GoodCostFunction: public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = 1;
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 0.0;
+    }
+    return true;
+  }
+};
+
+// The following four CostFunctions simulate the different ways in
+// which user code can cause ResidualBlock::Evaluate to fail.
+class NoResidualUpdateCostFunction: public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    // Forget to update the residuals.
+    // residuals[0] = 1;
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 0.0;
+    }
+    return true;
+  }
+};
+
+class NoJacobianUpdateCostFunction: public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = 1;
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      // Forget to update the jacobians.
+      // jacobians[0][0] = 0.0;
+    }
+    return true;
+  }
+};
+
+class BadResidualCostFunction: public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = std::numeric_limits<double>::infinity();
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 0.0;
+    }
+    return true;
+  }
+};
+
+class BadJacobianCostFunction: public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = 1.0;
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = std::numeric_limits<double>::quiet_NaN();
+    }
+    return true;
+  }
+};
+
+// Note: It is preferable to write the below test as:
+//
+//  CheckEvaluation(GoodCostFunction(), true);
+//  CheckEvaluation(NoResidualUpdateCostFunction(), false);
+//  CheckEvaluation(NoJacobianUpdateCostFunction(), false);
+//  ...
+//
+// however, there is a bug in the version of GCC on Mac OS X we tested, which
+// requires the objects get put into local variables instead of getting
+// instantiated on the stack.
+TEST(ResidualBlockUtils, CheckAllCombinationsOfBadness) {
+  GoodCostFunction good_fun;
+  CheckEvaluation(good_fun, true);
+  NoResidualUpdateCostFunction no_residual;
+  CheckEvaluation(no_residual, false);
+  NoJacobianUpdateCostFunction no_jacobian;
+  CheckEvaluation(no_jacobian, false);
+  BadResidualCostFunction bad_residual;
+  CheckEvaluation(bad_residual, false);
+  BadJacobianCostFunction bad_jacobian;
+  CheckEvaluation(bad_jacobian, false);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
new file mode 100644
index 0000000..d980ba2
--- /dev/null
+++ b/internal/ceres/rotation_test.cc
@@ -0,0 +1,1132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <limits>
+#include <string>
+#include "ceres/internal/eigen.h"
+#include "ceres/is_close.h"
+#include "ceres/internal/port.h"
+#include "ceres/jet.h"
+#include "ceres/rotation.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::min;
+using std::max;
+using std::numeric_limits;
+using std::string;
+using std::swap;
+
+const double kPi = 3.14159265358979323846;
+const double kHalfSqrt2 = 0.707106781186547524401;
+
+double RandDouble() {
+  double r = rand();
+  return r / RAND_MAX;
+}
+
+// A tolerance value for floating-point comparisons.
+static double const kTolerance = numeric_limits<double>::epsilon() * 10;
+
+// Looser tolerance used for numerically unstable conversions.
+static double const kLooseTolerance = 1e-9;
+
+// Use as:
+// double quaternion[4];
+// EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+MATCHER(IsNormalizedQuaternion, "") {
+  if (arg == NULL) {
+    *result_listener << "Null quaternion";
+    return false;
+  }
+
+  double norm2 = arg[0] * arg[0] + arg[1] * arg[1] +
+      arg[2] * arg[2] + arg[3] * arg[3];
+  if (fabs(norm2 - 1.0) > kTolerance) {
+    *result_listener << "squared norm is " << norm2;
+    return false;
+  }
+
+  return true;
+}
+
+// Use as:
+// double expected_quaternion[4];
+// double actual_quaternion[4];
+// EXPECT_THAT(actual_quaternion, IsNearQuaternion(expected_quaternion));
+MATCHER_P(IsNearQuaternion, expected, "") {
+  if (arg == NULL) {
+    *result_listener << "Null quaternion";
+    return false;
+  }
+
+  // Quaternions are equivalent upto a sign change. So we will compare
+  // both signs before declaring failure.
+  bool near = true;
+  for (int i = 0; i < 4; i++) {
+    if (fabs(arg[i] - expected[i]) > kTolerance) {
+      near = false;
+      break;
+    }
+  }
+
+  if (near) {
+    return true;
+  }
+
+  near = true;
+  for (int i = 0; i < 4; i++) {
+    if (fabs(arg[i] + expected[i]) > kTolerance) {
+      near = false;
+      break;
+    }
+  }
+
+  if (near) {
+    return true;
+  }
+
+  *result_listener << "expected : "
+                   << expected[0] << " "
+                   << expected[1] << " "
+                   << expected[2] << " "
+                   << expected[3] << " "
+                   << "actual : "
+                   << arg[0] << " "
+                   << arg[1] << " "
+                   << arg[2] << " "
+                   << arg[3];
+  return false;
+}
+
+// Use as:
+// double expected_axis_angle[3];
+// double actual_axis_angle[3];
+// EXPECT_THAT(actual_axis_angle, IsNearAngleAxis(expected_axis_angle));
+MATCHER_P(IsNearAngleAxis, expected, "") {
+  if (arg == NULL) {
+    *result_listener << "Null axis/angle";
+    return false;
+  }
+
+  Eigen::Vector3d a(arg[0], arg[1], arg[2]);
+  Eigen::Vector3d e(expected[0], expected[1], expected[2]);
+  const double e_norm = e.norm();
+
+  double delta_norm = numeric_limits<double>::max();
+  if (e_norm > 0) {
+    // Deal with the sign ambiguity near PI. Since the sign can flip,
+    // we take the smaller of the two differences.
+    if (fabs(e_norm - kPi) < kLooseTolerance) {
+      delta_norm = min((a - e).norm(), (a + e).norm()) / e_norm;
+    } else {
+      delta_norm = (a - e).norm() / e_norm;
+    }
+  } else {
+    delta_norm = a.norm();
+  }
+
+  if (delta_norm <= kLooseTolerance) {
+    return true;
+  }
+
+  *result_listener << " arg:"
+                   << " " << arg[0]
+                   << " " << arg[1]
+                   << " " << arg[2]
+                   << " was expected to be:"
+                   << " " << expected[0]
+                   << " " << expected[1]
+                   << " " << expected[2];
+  return false;
+}
+
+// Use as:
+// double matrix[9];
+// EXPECT_THAT(matrix, IsOrthonormal());
+MATCHER(IsOrthonormal, "") {
+  if (arg == NULL) {
+    *result_listener << "Null matrix";
+    return false;
+  }
+
+  for (int c1 = 0; c1 < 3; c1++) {
+    for (int c2 = 0; c2 < 3; c2++) {
+      double v = 0;
+      for (int i = 0; i < 3; i++) {
+        v += arg[i + 3 * c1] * arg[i + 3 * c2];
+      }
+      double expected = (c1 == c2) ? 1 : 0;
+      if (fabs(expected - v) > kTolerance) {
+        *result_listener << "Columns " << c1 << " and " << c2
+                         << " should have dot product " << expected
+                         << " but have " << v;
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+// Use as:
+// double matrix1[9];
+// double matrix2[9];
+// EXPECT_THAT(matrix1, IsNear3x3Matrix(matrix2));
+MATCHER_P(IsNear3x3Matrix, expected, "") {
+  if (arg == NULL) {
+    *result_listener << "Null matrix";
+    return false;
+  }
+
+  for (int i = 0; i < 9; i++) {
+    if (fabs(arg[i] - expected[i]) > kTolerance) {
+      *result_listener << "component " << i << " should be " << expected[i];
+      return false;
+    }
+  }
+
+  return true;
+}
+
+// Transforms a zero axis/angle to a quaternion.
+TEST(Rotation, ZeroAngleAxisToQuaternion) {
+  double axis_angle[3] = { 0, 0, 0 };
+  double quaternion[4];
+  double expected[4] = { 1, 0, 0, 0 };
+  AngleAxisToQuaternion(axis_angle, quaternion);
+  EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+  EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallAngleAxisToQuaternion) {
+  // Small, finite value to test.
+  double theta = 1.0e-2;
+  double axis_angle[3] = { theta, 0, 0 };
+  double quaternion[4];
+  double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  AngleAxisToQuaternion(axis_angle, quaternion);
+  EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+  EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Test that approximate conversion works for very small angles.
+TEST(Rotation, TinyAngleAxisToQuaternion) {
+  // Very small value that could potentially cause underflow.
+  double theta = pow(numeric_limits<double>::min(), 0.75);
+  double axis_angle[3] = { theta, 0, 0 };
+  double quaternion[4];
+  double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  AngleAxisToQuaternion(axis_angle, quaternion);
+  EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+  EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Transforms a rotation by pi/2 around X to a quaternion.
+TEST(Rotation, XRotationToQuaternion) {
+  double axis_angle[3] = { kPi / 2, 0, 0 };
+  double quaternion[4];
+  double expected[4] = { kHalfSqrt2, kHalfSqrt2, 0, 0 };
+  AngleAxisToQuaternion(axis_angle, quaternion);
+  EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+  EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Transforms a unit quaternion to an axis angle.
+TEST(Rotation, UnitQuaternionToAngleAxis) {
+  double quaternion[4] = { 1, 0, 0, 0 };
+  double axis_angle[3];
+  double expected[3] = { 0, 0, 0 };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Transforms a quaternion that rotates by pi about the Y axis to an axis angle.
+TEST(Rotation, YRotationQuaternionToAngleAxis) {
+  double quaternion[4] = { 0, 0, 1, 0 };
+  double axis_angle[3];
+  double expected[3] = { 0, kPi, 0 };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Transforms a quaternion that rotates by pi/3 about the Z axis to an axis
+// angle.
+TEST(Rotation, ZRotationQuaternionToAngleAxis) {
+  double quaternion[4] = { sqrt(3) / 2, 0, 0, 0.5 };
+  double axis_angle[3];
+  double expected[3] = { 0, 0, kPi / 3 };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallQuaternionToAngleAxis) {
+  // Small, finite value to test.
+  double theta = 1.0e-2;
+  double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double axis_angle[3];
+  double expected[3] = { theta, 0, 0 };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Test that approximate conversion works for very small angles.
+TEST(Rotation, TinyQuaternionToAngleAxis) {
+  // Very small value that could potentially cause underflow.
+  double theta = pow(numeric_limits<double>::min(), 0.75);
+  double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+  double axis_angle[3];
+  double expected[3] = { theta, 0, 0 };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+TEST(Rotation, QuaternionToAngleAxisAngleIsLessThanPi) {
+  double quaternion[4];
+  double angle_axis[3];
+
+  const double half_theta = 0.75 * kPi;
+
+  quaternion[0] = cos(half_theta);
+  quaternion[1] = 1.0 * sin(half_theta);
+  quaternion[2] = 0.0;
+  quaternion[3] = 0.0;
+  QuaternionToAngleAxis(quaternion, angle_axis);
+  const double angle = sqrt(angle_axis[0] * angle_axis[0] +
+                            angle_axis[1] * angle_axis[1] +
+                            angle_axis[2] * angle_axis[2]);
+  EXPECT_LE(angle, kPi);
+}
+
+static const int kNumTrials = 10000;
+
+// Takes a bunch of random axis/angle values, converts them to quaternions,
+// and back again.
+TEST(Rotation, AngleAxisToQuaterionAndBack) {
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    double axis_angle[3];
+    // Make an axis by choosing three random numbers in [-1, 1) and
+    // normalizing.
+    double norm = 0;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = RandDouble() * 2 - 1;
+      norm += axis_angle[i] * axis_angle[i];
+    }
+    norm = sqrt(norm);
+
+    // Angle in [-pi, pi).
+    double theta = kPi * 2 * RandDouble() - kPi;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = axis_angle[i] * theta / norm;
+    }
+
+    double quaternion[4];
+    double round_trip[3];
+    // We use ASSERTs here because if there's one failure, there are
+    // probably many and spewing a million failures doesn't make anyone's
+    // day.
+    AngleAxisToQuaternion(axis_angle, quaternion);
+    ASSERT_THAT(quaternion, IsNormalizedQuaternion());
+    QuaternionToAngleAxis(quaternion, round_trip);
+    ASSERT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+  }
+}
+
+// Takes a bunch of random quaternions, converts them to axis/angle,
+// and back again.
+TEST(Rotation, QuaterionToAngleAxisAndBack) {
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    double quaternion[4];
+    // Choose four random numbers in [-1, 1) and normalize.
+    double norm = 0;
+    for (int i = 0; i < 4; i++) {
+      quaternion[i] = RandDouble() * 2 - 1;
+      norm += quaternion[i] * quaternion[i];
+    }
+    norm = sqrt(norm);
+
+    for (int i = 0; i < 4; i++) {
+      quaternion[i] = quaternion[i] / norm;
+    }
+
+    double axis_angle[3];
+    double round_trip[4];
+    QuaternionToAngleAxis(quaternion, axis_angle);
+    AngleAxisToQuaternion(axis_angle, round_trip);
+    ASSERT_THAT(round_trip, IsNormalizedQuaternion());
+    ASSERT_THAT(round_trip, IsNearQuaternion(quaternion));
+  }
+}
+
+// Transforms a zero axis/angle to a rotation matrix.
+TEST(Rotation, ZeroAngleAxisToRotationMatrix) {
+  double axis_angle[3] = { 0, 0, 0 };
+  double matrix[9];
+  double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+  AngleAxisToRotationMatrix(axis_angle, matrix);
+  EXPECT_THAT(matrix, IsOrthonormal());
+  EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+}
+
+TEST(Rotation, NearZeroAngleAxisToRotationMatrix) {
+  double axis_angle[3] = { 1e-24, 2e-24, 3e-24 };
+  double matrix[9];
+  double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+  AngleAxisToRotationMatrix(axis_angle, matrix);
+  EXPECT_THAT(matrix, IsOrthonormal());
+  EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+}
+
+// Transforms a rotation by pi/2 around X to a rotation matrix and back.
+TEST(Rotation, XRotationToRotationMatrix) {
+  double axis_angle[3] = { kPi / 2, 0, 0 };
+  double matrix[9];
+  // The rotation matrices are stored column-major.
+  double expected[9] = { 1, 0, 0, 0, 0, 1, 0, -1, 0 };
+  AngleAxisToRotationMatrix(axis_angle, matrix);
+  EXPECT_THAT(matrix, IsOrthonormal());
+  EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+  double round_trip[3];
+  RotationMatrixToAngleAxis(matrix, round_trip);
+  EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+// Transforms an axis angle that rotates by pi about the Y axis to a
+// rotation matrix and back.
+TEST(Rotation, YRotationToRotationMatrix) {
+  double axis_angle[3] = { 0, kPi, 0 };
+  double matrix[9];
+  double expected[9] = { -1, 0, 0, 0, 1, 0, 0, 0, -1 };
+  AngleAxisToRotationMatrix(axis_angle, matrix);
+  EXPECT_THAT(matrix, IsOrthonormal());
+  EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+
+  double round_trip[3];
+  RotationMatrixToAngleAxis(matrix, round_trip);
+  EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+TEST(Rotation, NearPiAngleAxisRoundTrip) {
+  double in_axis_angle[3];
+  double matrix[9];
+  double out_axis_angle[3];
+
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    // Make an axis by choosing three random numbers in [-1, 1) and
+    // normalizing.
+    double norm = 0;
+    for (int i = 0; i < 3; i++) {
+      in_axis_angle[i] = RandDouble() * 2 - 1;
+      norm += in_axis_angle[i] * in_axis_angle[i];
+    }
+    norm = sqrt(norm);
+
+    // Angle in [pi - kMaxSmallAngle, pi).
+    const double kMaxSmallAngle = 1e-8;
+    double theta = kPi - kMaxSmallAngle * RandDouble();
+
+    for (int i = 0; i < 3; i++) {
+      in_axis_angle[i] *= (theta / norm);
+    }
+    AngleAxisToRotationMatrix(in_axis_angle, matrix);
+    RotationMatrixToAngleAxis(matrix, out_axis_angle);
+    EXPECT_THAT(in_axis_angle, IsNearAngleAxis(out_axis_angle));
+  }
+}
+
+TEST(Rotation, AtPiAngleAxisRoundTrip) {
+  // A rotation of kPi about the X axis;
+  static const double kMatrix[3][3] = {
+    {1.0,  0.0,  0.0},
+    {0.0,  -1.0,  0.0},
+    {0.0,  0.0,  -1.0}
+  };
+
+  double in_matrix[9];
+  // Fill it from kMatrix in col-major order.
+  for (int j = 0, k = 0; j < 3; ++j) {
+     for (int i = 0; i < 3; ++i, ++k) {
+       in_matrix[k] = kMatrix[i][j];
+     }
+  }
+
+  const double expected_axis_angle[3] = { kPi, 0, 0 };
+
+  double out_matrix[9];
+  double axis_angle[3];
+  RotationMatrixToAngleAxis(in_matrix, axis_angle);
+  AngleAxisToRotationMatrix(axis_angle, out_matrix);
+
+  LOG(INFO) << "AngleAxis = " << axis_angle[0] << " " << axis_angle[1]
+            << " " << axis_angle[2];
+  LOG(INFO) << "Expected AngleAxis = " << kPi << " 0 0";
+  double out_rowmajor[3][3];
+  for (int j = 0, k = 0; j < 3; ++j) {
+    for (int i = 0; i < 3; ++i, ++k) {
+      out_rowmajor[i][j] = out_matrix[k];
+    }
+  }
+  LOG(INFO) << "Rotation:";
+  LOG(INFO) << "EXPECTED        |        ACTUAL";
+  for (int i = 0; i < 3; ++i) {
+    string line;
+    for (int j = 0; j < 3; ++j) {
+      StringAppendF(&line, "%g ", kMatrix[i][j]);
+    }
+    line += "         |        ";
+    for (int j = 0; j < 3; ++j) {
+      StringAppendF(&line, "%g ", out_rowmajor[i][j]);
+    }
+    LOG(INFO) << line;
+  }
+
+  EXPECT_THAT(axis_angle, IsNearAngleAxis(expected_axis_angle));
+  EXPECT_THAT(out_matrix, IsNear3x3Matrix(in_matrix));
+}
+
+// Transforms an axis angle that rotates by pi/3 about the Z axis to a
+// rotation matrix.
+TEST(Rotation, ZRotationToRotationMatrix) {
+  double axis_angle[3] =  { 0, 0, kPi / 3 };
+  double matrix[9];
+  // This is laid-out row-major on the screen but is actually stored
+  // column-major.
+  double expected[9] = { 0.5, sqrt(3) / 2, 0,   // Column 1
+                         -sqrt(3) / 2, 0.5, 0,  // Column 2
+                         0, 0, 1 };             // Column 3
+  AngleAxisToRotationMatrix(axis_angle, matrix);
+  EXPECT_THAT(matrix, IsOrthonormal());
+  EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+  double round_trip[3];
+  RotationMatrixToAngleAxis(matrix, round_trip);
+  EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+// Takes a bunch of random axis/angle values, converts them to rotation
+// matrices, and back again.
+TEST(Rotation, AngleAxisToRotationMatrixAndBack) {
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    double axis_angle[3];
+    // Make an axis by choosing three random numbers in [-1, 1) and
+    // normalizing.
+    double norm = 0;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = RandDouble() * 2 - 1;
+      norm += axis_angle[i] * axis_angle[i];
+    }
+    norm = sqrt(norm);
+
+    // Angle in [-pi, pi).
+    double theta = kPi * 2 * RandDouble() - kPi;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = axis_angle[i] * theta / norm;
+    }
+
+    double matrix[9];
+    double round_trip[3];
+    AngleAxisToRotationMatrix(axis_angle, matrix);
+    ASSERT_THAT(matrix, IsOrthonormal());
+    RotationMatrixToAngleAxis(matrix, round_trip);
+
+    for (int i = 0; i < 3; ++i) {
+      EXPECT_NEAR(round_trip[i], axis_angle[i], kLooseTolerance);
+    }
+  }
+}
+
+// Takes a bunch of random axis/angle values near zero, converts them
+// to rotation matrices, and back again.
+TEST(Rotation, AngleAxisToRotationMatrixAndBackNearZero) {
+  srand(5);
+  for (int i = 0; i < kNumTrials; i++) {
+    double axis_angle[3];
+    // Make an axis by choosing three random numbers in [-1, 1) and
+    // normalizing.
+    double norm = 0;
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = RandDouble() * 2 - 1;
+      norm += axis_angle[i] * axis_angle[i];
+    }
+    norm = sqrt(norm);
+
+    // Tiny theta.
+    double theta = 1e-16 * (kPi * 2 * RandDouble() - kPi);
+    for (int i = 0; i < 3; i++) {
+      axis_angle[i] = axis_angle[i] * theta / norm;
+    }
+
+    double matrix[9];
+    double round_trip[3];
+    AngleAxisToRotationMatrix(axis_angle, matrix);
+    ASSERT_THAT(matrix, IsOrthonormal());
+    RotationMatrixToAngleAxis(matrix, round_trip);
+
+    for (int i = 0; i < 3; ++i) {
+      EXPECT_NEAR(round_trip[i], axis_angle[i],
+                  numeric_limits<double>::epsilon());
+    }
+  }
+}
+
+
+// Transposes a 3x3 matrix.
+static void Transpose3x3(double m[9]) {
+  swap(m[1], m[3]);
+  swap(m[2], m[6]);
+  swap(m[5], m[7]);
+}
+
+// Convert Euler angles from radians to degrees.
+static void ToDegrees(double euler_angles[3]) {
+  for (int i = 0; i < 3; ++i) {
+    euler_angles[i] *= 180.0 / kPi;
+  }
+}
+
+// Compare the 3x3 rotation matrices produced by the axis-angle
+// rotation 'aa' and the Euler angle rotation 'ea' (in radians).
+static void CompareEulerToAngleAxis(double aa[3], double ea[3]) {
+  double aa_matrix[9];
+  AngleAxisToRotationMatrix(aa, aa_matrix);
+  Transpose3x3(aa_matrix);  // Column to row major order.
+
+  double ea_matrix[9];
+  ToDegrees(ea);  // Radians to degrees.
+  const int kRowStride = 3;
+  EulerAnglesToRotationMatrix(ea, kRowStride, ea_matrix);
+
+  EXPECT_THAT(aa_matrix, IsOrthonormal());
+  EXPECT_THAT(ea_matrix, IsOrthonormal());
+  EXPECT_THAT(ea_matrix, IsNear3x3Matrix(aa_matrix));
+}
+
+// Test with rotation axis along the x/y/z axes.
+// Also test zero rotation.
+TEST(EulerAnglesToRotationMatrix, OnAxis) {
+  int n_tests = 0;
+  for (double x = -1.0; x <= 1.0; x += 1.0) {
+    for (double y = -1.0; y <= 1.0; y += 1.0) {
+      for (double z = -1.0; z <= 1.0; z += 1.0) {
+        if ((x != 0) + (y != 0) + (z != 0) > 1)
+          continue;
+        double axis_angle[3] = {x, y, z};
+        double euler_angles[3] = {x, y, z};
+        CompareEulerToAngleAxis(axis_angle, euler_angles);
+        ++n_tests;
+      }
+    }
+  }
+  CHECK_EQ(7, n_tests);
+}
+
+// Test that a random rotation produces an orthonormal rotation
+// matrix.
+TEST(EulerAnglesToRotationMatrix, IsOrthonormal) {
+  srand(5);
+  for (int trial = 0; trial < kNumTrials; ++trial) {
+    double euler_angles_degrees[3];
+    for (int i = 0; i < 3; ++i) {
+      euler_angles_degrees[i] = RandDouble() * 360.0 - 180.0;
+    }
+    double rotation_matrix[9];
+    EulerAnglesToRotationMatrix(euler_angles_degrees, 3, rotation_matrix);
+    EXPECT_THAT(rotation_matrix, IsOrthonormal());
+  }
+}
+
+// Tests using Jets for specific behavior involving auto differentiation
+// near singularity points.
+
+typedef Jet<double, 3> J3;
+typedef Jet<double, 4> J4;
+
+J3 MakeJ3(double a, double v0, double v1, double v2) {
+  J3 j;
+  j.a = a;
+  j.v[0] = v0;
+  j.v[1] = v1;
+  j.v[2] = v2;
+  return j;
+}
+
+J4 MakeJ4(double a, double v0, double v1, double v2, double v3) {
+  J4 j;
+  j.a = a;
+  j.v[0] = v0;
+  j.v[1] = v1;
+  j.v[2] = v2;
+  j.v[3] = v3;
+  return j;
+}
+
+bool IsClose(double x, double y) {
+  EXPECT_FALSE(IsNaN(x));
+  EXPECT_FALSE(IsNaN(y));
+  return internal::IsClose(x, y, kTolerance, NULL, NULL);
+}
+
+template <int N>
+bool IsClose(const Jet<double, N> &x, const Jet<double, N> &y) {
+  if (!IsClose(x.a, y.a)) {
+    return false;
+  }
+  for (int i = 0; i < N; i++) {
+    if (!IsClose(x.v[i], y.v[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <int M, int N>
+void ExpectJetArraysClose(const Jet<double, N> *x, const Jet<double, N> *y) {
+  for (int i = 0; i < M; i++) {
+    if (!IsClose(x[i], y[i])) {
+      LOG(ERROR) << "Jet " << i << "/" << M << " not equal";
+      LOG(ERROR) << "x[" << i << "]: " << x[i];
+      LOG(ERROR) << "y[" << i << "]: " << y[i];
+      Jet<double, N> d, zero;
+      d.a = y[i].a - x[i].a;
+      for (int j = 0; j < N; j++) {
+        d.v[j] = y[i].v[j] - x[i].v[j];
+      }
+      LOG(ERROR) << "diff: " << d;
+      EXPECT_TRUE(IsClose(x[i], y[i]));
+    }
+  }
+}
+
+// Log-10 of a value well below machine precision.
+static const int kSmallTinyCutoff =
+    static_cast<int>(2 * log(numeric_limits<double>::epsilon())/log(10.0));
+
+// Log-10 of a value just below values representable by double.
+static const int kTinyZeroLimit   =
+    static_cast<int>(1 + log(numeric_limits<double>::min())/log(10.0));
+
+// Test that exact conversion works for small angles when jets are used.
+TEST(Rotation, SmallAngleAxisToQuaternionForJets) {
+  // Examine small x rotations that are still large enough
+  // to be well within the range represented by doubles.
+  for (int i = -2; i >= kSmallTinyCutoff; i--) {
+    double theta = pow(10.0, i);
+    J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+    J3 quaternion[4];
+    J3 expected[4] = {
+        MakeJ3(cos(theta/2), -sin(theta/2)/2, 0, 0),
+        MakeJ3(sin(theta/2), cos(theta/2)/2, 0, 0),
+        MakeJ3(0, 0, sin(theta/2)/theta, 0),
+        MakeJ3(0, 0, 0, sin(theta/2)/theta),
+    };
+    AngleAxisToQuaternion(axis_angle, quaternion);
+    ExpectJetArraysClose<4, 3>(quaternion, expected);
+  }
+}
+
+
+// Test that conversion works for very small angles when jets are used.
+TEST(Rotation, TinyAngleAxisToQuaternionForJets) {
+  // Examine tiny x rotations that extend all the way to where
+  // underflow occurs.
+  for (int i = kSmallTinyCutoff; i >= kTinyZeroLimit; i--) {
+    double theta = pow(10.0, i);
+    J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+    J3 quaternion[4];
+    // To avoid loss of precision in the test itself,
+    // a finite expansion is used here, which will
+    // be exact up to machine precision for the test values used.
+    J3 expected[4] = {
+        MakeJ3(1.0, 0, 0, 0),
+        MakeJ3(0, 0.5, 0, 0),
+        MakeJ3(0, 0, 0.5, 0),
+        MakeJ3(0, 0, 0, 0.5),
+    };
+    AngleAxisToQuaternion(axis_angle, quaternion);
+    ExpectJetArraysClose<4, 3>(quaternion, expected);
+  }
+}
+
+// Test that derivatives are correct for zero rotation.
+TEST(Rotation, ZeroAngleAxisToQuaternionForJets) {
+  J3 axis_angle[3] = { J3(0, 0), J3(0, 1), J3(0, 2) };
+  J3 quaternion[4];
+  J3 expected[4] = {
+      MakeJ3(1.0, 0, 0, 0),
+      MakeJ3(0, 0.5, 0, 0),
+      MakeJ3(0, 0, 0.5, 0),
+      MakeJ3(0, 0, 0, 0.5),
+  };
+  AngleAxisToQuaternion(axis_angle, quaternion);
+  ExpectJetArraysClose<4, 3>(quaternion, expected);
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallQuaternionToAngleAxisForJets) {
+  // Examine small x rotations that are still large enough
+  // to be well within the range represented by doubles.
+  for (int i = -2; i >= kSmallTinyCutoff; i--) {
+    double theta = pow(10.0, i);
+    double s = sin(theta);
+    double c = cos(theta);
+    J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+    J4 axis_angle[3];
+    J4 expected[3] = {
+        MakeJ4(2*theta, -2*s, 2*c,  0,         0),
+        MakeJ4(0,        0,   0,    2*theta/s, 0),
+        MakeJ4(0,        0,   0,    0,         2*theta/s),
+    };
+    QuaternionToAngleAxis(quaternion, axis_angle);
+    ExpectJetArraysClose<3, 4>(axis_angle, expected);
+  }
+}
+
+// Test that conversion works for very small angles.
+TEST(Rotation, TinyQuaternionToAngleAxisForJets) {
+  // Examine tiny x rotations that extend all the way to where
+  // underflow occurs.
+  for (int i = kSmallTinyCutoff; i >= kTinyZeroLimit; i--) {
+    double theta = pow(10.0, i);
+    double s = sin(theta);
+    double c = cos(theta);
+    J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+    J4 axis_angle[3];
+    // To avoid loss of precision in the test itself,
+    // a finite expansion is used here, which will
+    // be exact up to machine precision for the test values used.
+    J4 expected[3] = {
+        MakeJ4(2*theta, -2*s, 2.0, 0,   0),
+        MakeJ4(0,        0,   0,   2.0, 0),
+        MakeJ4(0,        0,   0,   0,   2.0),
+    };
+    QuaternionToAngleAxis(quaternion, axis_angle);
+    ExpectJetArraysClose<3, 4>(axis_angle, expected);
+  }
+}
+
+// Test that conversion works for no rotation.
+TEST(Rotation, ZeroQuaternionToAngleAxisForJets) {
+  J4 quaternion[4] = { J4(1, 0), J4(0, 1), J4(0, 2), J4(0, 3) };
+  J4 axis_angle[3];
+  J4 expected[3] = {
+      MakeJ4(0, 0, 2.0, 0, 0),
+      MakeJ4(0, 0, 0, 2.0, 0),
+      MakeJ4(0, 0, 0, 0, 2.0),
+  };
+  QuaternionToAngleAxis(quaternion, axis_angle);
+  ExpectJetArraysClose<3, 4>(axis_angle, expected);
+}
+
+TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrixCanned) {
+  // Canned data generated in octave.
+  double const q[4] = {
+    +0.1956830471754074,
+    -0.0150618562474847,
+    +0.7634572982788086,
+    -0.3019454777240753,
+  };
+  double const Q[3][3] = {  // Scaled rotation matrix.
+    { -0.6355194033477252,  0.0951730541682254,  0.3078870197911186 },
+    { -0.1411693904792992,  0.5297609702153905, -0.4551502574482019 },
+    { -0.2896955822708862, -0.4669396571547050, -0.4536309793389248 },
+  };
+  double const R[3][3] = {  // With unit rows and columns.
+    { -0.8918859164053080,  0.1335655625725649,  0.4320876677394745 },
+    { -0.1981166751680096,  0.7434648665444399, -0.6387564287225856 },
+    { -0.4065578619806013, -0.6553016349046693, -0.6366242786393164 },
+  };
+
+  // Compute R from q and compare to known answer.
+  double Rq[3][3];
+  QuaternionToScaledRotation<double>(q, Rq[0]);
+  ExpectArraysClose(9, Q[0], Rq[0], kTolerance);
+
+  // Now do the same but compute R with normalization.
+  QuaternionToRotation<double>(q, Rq[0]);
+  ExpectArraysClose(9, R[0], Rq[0], kTolerance);
+}
+
+
+TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrix) {
+  // Rotation defined by a unit quaternion.
+  double const q[4] = {
+    0.2318160216097109,
+    -0.0178430356832060,
+    0.9044300776717159,
+    -0.3576998641394597,
+  };
+  double const p[3] = {
+    +0.11,
+    -13.15,
+    1.17,
+  };
+
+  double R[3 * 3];
+  QuaternionToRotation(q, R);
+
+  double result1[3];
+  UnitQuaternionRotatePoint(q, p, result1);
+
+  double result2[3];
+  VectorRef(result2, 3) = ConstMatrixRef(R, 3, 3)* ConstVectorRef(p, 3);
+  ExpectArraysClose(3, result1, result2, kTolerance);
+}
+
+
+// Verify that (a * b) * c == a * (b * c).
+TEST(Quaternion, MultiplicationIsAssociative) {
+  double a[4];
+  double b[4];
+  double c[4];
+  for (int i = 0; i < 4; ++i) {
+    a[i] = 2 * RandDouble() - 1;
+    b[i] = 2 * RandDouble() - 1;
+    c[i] = 2 * RandDouble() - 1;
+  }
+
+  double ab[4];
+  double ab_c[4];
+  QuaternionProduct(a, b, ab);
+  QuaternionProduct(ab, c, ab_c);
+
+  double bc[4];
+  double a_bc[4];
+  QuaternionProduct(b, c, bc);
+  QuaternionProduct(a, bc, a_bc);
+
+  ASSERT_NEAR(ab_c[0], a_bc[0], kTolerance);
+  ASSERT_NEAR(ab_c[1], a_bc[1], kTolerance);
+  ASSERT_NEAR(ab_c[2], a_bc[2], kTolerance);
+  ASSERT_NEAR(ab_c[3], a_bc[3], kTolerance);
+}
+
+
+TEST(AngleAxis, RotatePointGivesSameAnswerAsRotationMatrix) {
+  double angle_axis[3];
+  double R[9];
+  double p[3];
+  double angle_axis_rotated_p[3];
+  double rotation_matrix_rotated_p[3];
+
+  for (int i = 0; i < 10000; ++i) {
+    double theta = (2.0 * i * 0.0011 - 1.0) * kPi;
+    for (int j = 0; j < 50; ++j) {
+      double norm2 = 0.0;
+      for (int k = 0; k < 3; ++k) {
+        angle_axis[k] = 2.0 * RandDouble() - 1.0;
+        p[k] = 2.0 * RandDouble() - 1.0;
+        norm2 = angle_axis[k] * angle_axis[k];
+      }
+
+      const double inv_norm = theta / sqrt(norm2);
+      for (int k = 0; k < 3; ++k) {
+        angle_axis[k] *= inv_norm;
+      }
+
+      AngleAxisToRotationMatrix(angle_axis, R);
+      rotation_matrix_rotated_p[0] = R[0] * p[0] + R[3] * p[1] + R[6] * p[2];
+      rotation_matrix_rotated_p[1] = R[1] * p[0] + R[4] * p[1] + R[7] * p[2];
+      rotation_matrix_rotated_p[2] = R[2] * p[0] + R[5] * p[1] + R[8] * p[2];
+
+      AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
+      for (int k = 0; k < 3; ++k) {
+        EXPECT_NEAR(rotation_matrix_rotated_p[k],
+                    angle_axis_rotated_p[k],
+                    kTolerance) << "p: " << p[0]
+                                << " " << p[1]
+                                << " " << p[2]
+                                << " angle_axis: " << angle_axis[0]
+                                << " " << angle_axis[1]
+                                << " " << angle_axis[2];
+      }
+    }
+  }
+}
+
+TEST(AngleAxis, NearZeroRotatePointGivesSameAnswerAsRotationMatrix) {
+  double angle_axis[3];
+  double R[9];
+  double p[3];
+  double angle_axis_rotated_p[3];
+  double rotation_matrix_rotated_p[3];
+
+  for (int i = 0; i < 10000; ++i) {
+    double norm2 = 0.0;
+    for (int k = 0; k < 3; ++k) {
+      angle_axis[k] = 2.0 * RandDouble() - 1.0;
+      p[k] = 2.0 * RandDouble() - 1.0;
+      norm2 = angle_axis[k] * angle_axis[k];
+    }
+
+    double theta = (2.0 * i * 0.0001  - 1.0) * 1e-16;
+    const double inv_norm = theta / sqrt(norm2);
+    for (int k = 0; k < 3; ++k) {
+      angle_axis[k] *= inv_norm;
+    }
+
+    AngleAxisToRotationMatrix(angle_axis, R);
+    rotation_matrix_rotated_p[0] = R[0] * p[0] + R[3] * p[1] + R[6] * p[2];
+    rotation_matrix_rotated_p[1] = R[1] * p[0] + R[4] * p[1] + R[7] * p[2];
+    rotation_matrix_rotated_p[2] = R[2] * p[0] + R[5] * p[1] + R[8] * p[2];
+
+    AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
+    for (int k = 0; k < 3; ++k) {
+      EXPECT_NEAR(rotation_matrix_rotated_p[k],
+                  angle_axis_rotated_p[k],
+                  kTolerance) << "p: " << p[0]
+                              << " " << p[1]
+                              << " " << p[2]
+                              << " angle_axis: " << angle_axis[0]
+                              << " " << angle_axis[1]
+                              << " " << angle_axis[2];
+    }
+  }
+}
+
+TEST(MatrixAdapter, RowMajor3x3ReturnTypeAndAccessIsCorrect) {
+  double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 };
+  const float const_array[9] =
+      { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f };
+  MatrixAdapter<double, 3, 1> A = RowMajorAdapter3x3(array);
+  MatrixAdapter<const float, 3, 1> B = RowMajorAdapter3x3(const_array);
+
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      // The values are integers from 1 to 9, so equality tests are appropriate
+      // even for float and double values.
+      EXPECT_EQ(A(i, j), array[3*i+j]);
+      EXPECT_EQ(B(i, j), const_array[3*i+j]);
+    }
+  }
+}
+
+TEST(MatrixAdapter, ColumnMajor3x3ReturnTypeAndAccessIsCorrect) {
+  double array[9] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 };
+  const float const_array[9] =
+      { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f };
+  MatrixAdapter<double, 1, 3> A = ColumnMajorAdapter3x3(array);
+  MatrixAdapter<const float, 1, 3> B = ColumnMajorAdapter3x3(const_array);
+
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      // The values are integers from 1 to 9, so equality tests are
+      // appropriate even for float and double values.
+      EXPECT_EQ(A(i, j), array[3*j+i]);
+      EXPECT_EQ(B(i, j), const_array[3*j+i]);
+    }
+  }
+}
+
+TEST(MatrixAdapter, RowMajor2x4IsCorrect) {
+  const int expected[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+  int array[8];
+  MatrixAdapter<int, 4, 1> M(array);
+  M(0, 0) = 1; M(0, 1) = 2; M(0, 2) = 3; M(0, 3) = 4;
+  M(1, 0) = 5; M(1, 1) = 6; M(1, 2) = 7; M(1, 3) = 8;
+  for (int k = 0; k < 8; ++k) {
+    EXPECT_EQ(array[k], expected[k]);
+  }
+}
+
+TEST(MatrixAdapter, ColumnMajor2x4IsCorrect) {
+  const int expected[8] = { 1, 5, 2, 6, 3, 7, 4, 8 };
+  int array[8];
+  MatrixAdapter<int, 1, 2> M(array);
+  M(0, 0) = 1; M(0, 1) = 2; M(0, 2) = 3; M(0, 3) = 4;
+  M(1, 0) = 5; M(1, 1) = 6; M(1, 2) = 7; M(1, 3) = 8;
+  for (int k = 0; k < 8; ++k) {
+    EXPECT_EQ(array[k], expected[k]);
+  }
+}
+
+TEST(RotationMatrixToAngleAxis, NearPiExampleOneFromTobiasStrauss) {
+  // Example from Tobias Strauss
+  const double rotation_matrix[] = {
+    -0.999807135425239,    -0.0128154391194470,   -0.0148814136745799,
+    -0.0128154391194470,   -0.148441438622958,     0.988838158557669,
+    -0.0148814136745799,    0.988838158557669,     0.148248574048196
+  };
+
+  double angle_axis[3];
+  RotationMatrixToAngleAxis(RowMajorAdapter3x3(rotation_matrix), angle_axis);
+  double round_trip[9];
+  AngleAxisToRotationMatrix(angle_axis, RowMajorAdapter3x3(round_trip));
+  EXPECT_THAT(rotation_matrix, IsNear3x3Matrix(round_trip));
+}
+
+void CheckRotationMatrixToAngleAxisRoundTrip(const double theta,
+                                             const double phi,
+                                             const double angle) {
+  double angle_axis[3];
+  angle_axis[0] = angle * sin(phi) * cos(theta);
+  angle_axis[1] = angle * sin(phi) * sin(theta);
+  angle_axis[2] = angle * cos(phi);
+
+  double rotation_matrix[9];
+  AngleAxisToRotationMatrix(angle_axis, rotation_matrix);
+
+  double angle_axis_round_trip[3];
+  RotationMatrixToAngleAxis(rotation_matrix, angle_axis_round_trip);
+  EXPECT_THAT(angle_axis_round_trip, IsNearAngleAxis(angle_axis));
+}
+
+TEST(RotationMatrixToAngleAxis, ExhaustiveRoundTrip) {
+  const double kMaxSmallAngle = 1e-8;
+  const int kNumSteps = 1000;
+  for (int i = 0; i < kNumSteps; ++i) {
+    const double theta = static_cast<double>(i) / kNumSteps * 2.0 * kPi;
+    for (int j = 0; j < kNumSteps; ++j) {
+      const double phi = static_cast<double>(j) / kNumSteps * kPi;
+      // Rotations of angle Pi.
+      CheckRotationMatrixToAngleAxisRoundTrip(theta, phi, kPi);
+      // Rotation of angle approximately Pi.
+      CheckRotationMatrixToAngleAxisRoundTrip(
+          theta, phi, kPi - kMaxSmallAngle * RandDouble());
+      // Rotations of angle approximately zero.
+      CheckRotationMatrixToAngleAxisRoundTrip(
+          theta, phi, kMaxSmallAngle * 2.0 * RandDouble() - 1.0);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
new file mode 100644
index 0000000..19f5b06
--- /dev/null
+++ b/internal/ceres/schur_complement_solver.cc
@@ -0,0 +1,422 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/schur_complement_solver.h"
+
+#include <algorithm>
+#include <ctime>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/detect_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/lapack.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::vector;
+
+namespace {
+
+class BlockRandomAccessSparseMatrixAdapter : public LinearOperator {
+ public:
+  explicit BlockRandomAccessSparseMatrixAdapter(
+      const BlockRandomAccessSparseMatrix& m)
+      : m_(m) {
+  }
+
+  virtual ~BlockRandomAccessSparseMatrixAdapter() {}
+
+  // y = y + Ax;
+  virtual void RightMultiply(const double* x, double* y) const {
+    m_.SymmetricRightMultiply(x, y);
+  }
+
+  // y = y + A'x;
+  virtual void LeftMultiply(const double* x, double* y) const {
+    m_.SymmetricRightMultiply(x, y);
+  }
+
+  virtual int num_rows() const { return m_.num_rows(); }
+  virtual int num_cols() const { return m_.num_rows(); }
+
+ private:
+  const BlockRandomAccessSparseMatrix& m_;
+};
+
+class BlockRandomAccessDiagonalMatrixAdapter : public LinearOperator {
+ public:
+  explicit BlockRandomAccessDiagonalMatrixAdapter(
+      const BlockRandomAccessDiagonalMatrix& m)
+      : m_(m) {
+  }
+
+  virtual ~BlockRandomAccessDiagonalMatrixAdapter() {}
+
+  // y = y + Ax;
+  virtual void RightMultiply(const double* x, double* y) const {
+    m_.RightMultiply(x, y);
+  }
+
+  // y = y + A'x;
+  virtual void LeftMultiply(const double* x, double* y) const {
+    m_.RightMultiply(x, y);
+  }
+
+  virtual int num_rows() const { return m_.num_rows(); }
+  virtual int num_cols() const { return m_.num_rows(); }
+
+ private:
+  const BlockRandomAccessDiagonalMatrix& m_;
+};
+
+} // namespace
+
+LinearSolver::Summary SchurComplementSolver::SolveImpl(
+    BlockSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("SchurComplementSolver::Solve");
+
+  if (eliminator_.get() == NULL) {
+    InitStorage(A->block_structure());
+    DetectStructure(*A->block_structure(),
+                    options_.elimination_groups[0],
+                    &options_.row_block_size,
+                    &options_.e_block_size,
+                    &options_.f_block_size);
+    eliminator_.reset(SchurEliminatorBase::Create(options_));
+    CHECK(eliminator_ != nullptr);
+    const bool kFullRankETE = true;
+    eliminator_->Init(
+        options_.elimination_groups[0], kFullRankETE, A->block_structure());
+  };
+
+  std::fill(x, x + A->num_cols(), 0.0);
+  event_logger.AddEvent("Setup");
+
+  eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get());
+  event_logger.AddEvent("Eliminate");
+
+  double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
+  const LinearSolver::Summary summary =
+      SolveReducedLinearSystem(per_solve_options, reduced_solution);
+  event_logger.AddEvent("ReducedSolve");
+
+  if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+    eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
+    event_logger.AddEvent("BackSubstitute");
+  }
+
+  return summary;
+}
+
+// Initialize a BlockRandomAccessDenseMatrix to store the Schur
+// complement.
+void DenseSchurComplementSolver::InitStorage(
+    const CompressedRowBlockStructure* bs) {
+  const int num_eliminate_blocks = options().elimination_groups[0];
+  const int num_col_blocks = bs->cols.size();
+
+  vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
+  for (int i = num_eliminate_blocks, j = 0;
+       i < num_col_blocks;
+       ++i, ++j) {
+    blocks[j] = bs->cols[i].size;
+  }
+
+  set_lhs(new BlockRandomAccessDenseMatrix(blocks));
+  set_rhs(new double[lhs()->num_rows()]);
+}
+
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessDenseMatrix. The linear system is solved using
+// Eigen's Cholesky factorization.
+LinearSolver::Summary
+DenseSchurComplementSolver::SolveReducedLinearSystem(
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* solution) {
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  const BlockRandomAccessDenseMatrix* m =
+      down_cast<const BlockRandomAccessDenseMatrix*>(lhs());
+  const int num_rows = m->num_rows();
+
+  // The case where there are no f blocks, and the system is block
+  // diagonal.
+  if (num_rows == 0) {
+    return summary;
+  }
+
+  summary.num_iterations = 1;
+
+  if (options().dense_linear_algebra_library_type == EIGEN) {
+    Eigen::LLT<Matrix, Eigen::Upper> llt =
+        ConstMatrixRef(m->values(), num_rows, num_rows)
+        .selfadjointView<Eigen::Upper>()
+        .llt();
+    if (llt.info() != Eigen::Success) {
+      summary.termination_type = LINEAR_SOLVER_FAILURE;
+      summary.message =
+          "Eigen failure. Unable to perform dense Cholesky factorization.";
+      return summary;
+    }
+
+    VectorRef(solution, num_rows) = llt.solve(ConstVectorRef(rhs(), num_rows));
+  } else {
+    VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
+    summary.termination_type =
+        LAPACK::SolveInPlaceUsingCholesky(num_rows,
+                                          m->values(),
+                                          solution,
+                                          &summary.message);
+  }
+
+  return summary;
+}
+
+SparseSchurComplementSolver::SparseSchurComplementSolver(
+    const LinearSolver::Options& options)
+    : SchurComplementSolver(options) {
+  if (options.type != ITERATIVE_SCHUR) {
+    sparse_cholesky_ = SparseCholesky::Create(options);
+  }
+}
+
+SparseSchurComplementSolver::~SparseSchurComplementSolver() {
+}
+
+// Determine the non-zero blocks in the Schur Complement matrix, and
+// initialize a BlockRandomAccessSparseMatrix object.
+void SparseSchurComplementSolver::InitStorage(
+    const CompressedRowBlockStructure* bs) {
+  const int num_eliminate_blocks = options().elimination_groups[0];
+  const int num_col_blocks = bs->cols.size();
+  const int num_row_blocks = bs->rows.size();
+
+  blocks_.resize(num_col_blocks - num_eliminate_blocks, 0);
+  for (int i = num_eliminate_blocks; i < num_col_blocks; ++i) {
+    blocks_[i - num_eliminate_blocks] = bs->cols[i].size;
+  }
+
+  set<pair<int, int>> block_pairs;
+  for (int i = 0; i < blocks_.size(); ++i) {
+    block_pairs.insert(make_pair(i, i));
+  }
+
+  int r = 0;
+  while (r < num_row_blocks) {
+    int e_block_id = bs->rows[r].cells.front().block_id;
+    if (e_block_id >= num_eliminate_blocks) {
+      break;
+    }
+    vector<int> f_blocks;
+
+    // Add to the chunk until the first block in the row is
+    // different than the one in the first row for the chunk.
+    for (; r < num_row_blocks; ++r) {
+      const CompressedRow& row = bs->rows[r];
+      if (row.cells.front().block_id != e_block_id) {
+        break;
+      }
+
+      // Iterate over the blocks in the row, ignoring the first
+      // block since it is the one to be eliminated.
+      for (int c = 1; c < row.cells.size(); ++c) {
+        const Cell& cell = row.cells[c];
+        f_blocks.push_back(cell.block_id - num_eliminate_blocks);
+      }
+    }
+
+    sort(f_blocks.begin(), f_blocks.end());
+    f_blocks.erase(unique(f_blocks.begin(), f_blocks.end()), f_blocks.end());
+    for (int i = 0; i < f_blocks.size(); ++i) {
+      for (int j = i + 1; j < f_blocks.size(); ++j) {
+        block_pairs.insert(make_pair(f_blocks[i], f_blocks[j]));
+      }
+    }
+  }
+
+  // Remaining rows do not contribute to the chunks and directly go
+  // into the schur complement via an outer product.
+  for (; r < num_row_blocks; ++r) {
+    const CompressedRow& row = bs->rows[r];
+    CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
+    for (int i = 0; i < row.cells.size(); ++i) {
+      int r_block1_id = row.cells[i].block_id - num_eliminate_blocks;
+      for (int j = 0; j < row.cells.size(); ++j) {
+        int r_block2_id = row.cells[j].block_id - num_eliminate_blocks;
+        if (r_block1_id <= r_block2_id) {
+          block_pairs.insert(make_pair(r_block1_id, r_block2_id));
+        }
+      }
+    }
+  }
+
+  set_lhs(new BlockRandomAccessSparseMatrix(blocks_, block_pairs));
+  set_rhs(new double[lhs()->num_rows()]);
+}
+
+LinearSolver::Summary SparseSchurComplementSolver::SolveReducedLinearSystem(
+    const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
+  if (options().type == ITERATIVE_SCHUR) {
+    return SolveReducedLinearSystemUsingConjugateGradients(per_solve_options,
+                                                           solution);
+  }
+
+  LinearSolver::Summary summary;
+  summary.num_iterations = 0;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  const TripletSparseMatrix* tsm =
+      down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix();
+  if (tsm->num_rows() == 0) {
+    return summary;
+  }
+
+  std::unique_ptr<CompressedRowSparseMatrix> lhs;
+  const CompressedRowSparseMatrix::StorageType storage_type =
+      sparse_cholesky_->StorageType();
+  if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+  } else {
+    lhs.reset(
+        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+    lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+  }
+
+  *lhs->mutable_col_blocks() = blocks_;
+  *lhs->mutable_row_blocks() = blocks_;
+
+  summary.num_iterations = 1;
+  summary.termination_type = sparse_cholesky_->FactorAndSolve(
+      lhs.get(), rhs(), solution, &summary.message);
+  return summary;
+}
+
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingConjugateGradients(
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* solution) {
+  CHECK(options().use_explicit_schur_complement);
+  const int num_rows = lhs()->num_rows();
+  // The case where there are no f blocks, and the system is block
+  // diagonal.
+  if (num_rows == 0) {
+    LinearSolver::Summary summary;
+    summary.num_iterations = 0;
+    summary.termination_type = LINEAR_SOLVER_SUCCESS;
+    summary.message = "Success.";
+    return summary;
+  }
+
+  // Only SCHUR_JACOBI is supported over here right now.
+  CHECK_EQ(options().preconditioner_type, SCHUR_JACOBI);
+
+  if (preconditioner_.get() == NULL) {
+    preconditioner_.reset(new BlockRandomAccessDiagonalMatrix(blocks_));
+  }
+
+  BlockRandomAccessSparseMatrix* sc =
+      down_cast<BlockRandomAccessSparseMatrix*>(
+          const_cast<BlockRandomAccessMatrix*>(lhs()));
+
+  // Extract block diagonal from the Schur complement to construct the
+  // schur_jacobi preconditioner.
+  for (int i = 0; i  < blocks_.size(); ++i) {
+    const int block_size = blocks_[i];
+
+    int sc_r, sc_c, sc_row_stride, sc_col_stride;
+    CellInfo* sc_cell_info =
+        sc->GetCell(i, i, &sc_r, &sc_c, &sc_row_stride, &sc_col_stride);
+    CHECK(sc_cell_info != nullptr);
+    MatrixRef sc_m(sc_cell_info->values, sc_row_stride, sc_col_stride);
+
+    int pre_r, pre_c, pre_row_stride, pre_col_stride;
+    CellInfo* pre_cell_info = preconditioner_->GetCell(
+        i, i, &pre_r, &pre_c, &pre_row_stride, &pre_col_stride);
+    CHECK(pre_cell_info != nullptr);
+    MatrixRef pre_m(pre_cell_info->values, pre_row_stride, pre_col_stride);
+
+    pre_m.block(pre_r, pre_c, block_size, block_size) =
+        sc_m.block(sc_r, sc_c, block_size, block_size);
+  }
+  preconditioner_->Invert();
+
+  VectorRef(solution, num_rows).setZero();
+
+  std::unique_ptr<LinearOperator> lhs_adapter(
+      new BlockRandomAccessSparseMatrixAdapter(*sc));
+  std::unique_ptr<LinearOperator> preconditioner_adapter(
+      new BlockRandomAccessDiagonalMatrixAdapter(*preconditioner_));
+
+
+  LinearSolver::Options cg_options;
+  cg_options.min_num_iterations = options().min_num_iterations;
+  cg_options.max_num_iterations = options().max_num_iterations;
+  ConjugateGradientsSolver cg_solver(cg_options);
+
+  LinearSolver::PerSolveOptions cg_per_solve_options;
+  cg_per_solve_options.r_tolerance = per_solve_options.r_tolerance;
+  cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
+  cg_per_solve_options.preconditioner = preconditioner_adapter.get();
+
+  return cg_solver.Solve(lhs_adapter.get(),
+                         rhs(),
+                         cg_per_solve_options,
+                         solution);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
new file mode 100644
index 0000000..16ffb8c
--- /dev/null
+++ b/internal/ceres/schur_complement_solver.h
@@ -0,0 +1,194 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+#define CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/types.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#include "Eigen/OrderingMethods"
+#endif
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class SparseCholesky;
+
+// Base class for Schur complement based linear least squares
+// solvers. It assumes that the input linear system Ax = b can be
+// partitioned into
+//
+//  E y + F z = b
+//
+// Where x = [y;z] is a partition of the variables.  The paritioning
+// of the variables is such that, E'E is a block diagonal
+// matrix. Further, the rows of A are ordered so that for every
+// variable block in y, all the rows containing that variable block
+// occur as a vertically contiguous block. i.e the matrix A looks like
+//
+//              E                 F
+//  A = [ y1   0   0   0 |  z1    0    0   0    z5]
+//      [ y1   0   0   0 |  z1   z2    0   0     0]
+//      [  0  y2   0   0 |   0    0   z3   0     0]
+//      [  0   0  y3   0 |  z1   z2   z3  z4    z5]
+//      [  0   0  y3   0 |  z1    0    0   0    z5]
+//      [  0   0   0  y4 |   0    0    0   0    z5]
+//      [  0   0   0  y4 |   0   z2    0   0     0]
+//      [  0   0   0  y4 |   0    0    0   0     0]
+//      [  0   0   0   0 |  z1    0    0   0     0]
+//      [  0   0   0   0 |   0    0   z3  z4    z5]
+//
+// This structure should be reflected in the corresponding
+// CompressedRowBlockStructure object associated with A. The linear
+// system Ax = b should either be well posed or the array D below
+// should be non-null and the diagonal matrix corresponding to it
+// should be non-singular.
+//
+// SchurComplementSolver has two sub-classes.
+//
+// DenseSchurComplementSolver: For problems where the Schur complement
+// matrix is small and dense, or if CHOLMOD/SuiteSparse is not
+// installed. For structure from motion problems, this is solver can
+// be used for problems with upto a few hundred cameras.
+//
+// SparseSchurComplementSolver: For problems where the Schur
+// complement matrix is large and sparse. It requires that Ceres be
+// build with at least one sparse linear algebra library, as it
+// computes a sparse Cholesky factorization of the Schur complement.
+//
+// This solver can be used for solving structure from motion problems
+// with tens of thousands of cameras, though depending on the exact
+// sparsity structure, it maybe better to use an iterative solver.
+//
+// The two solvers can be instantiated by calling
+// LinearSolver::CreateLinearSolver with LinearSolver::Options::type
+// set to DENSE_SCHUR and SPARSE_SCHUR
+// respectively. LinearSolver::Options::elimination_groups[0] should
+// be at least 1.
+class SchurComplementSolver : public BlockSparseMatrixSolver {
+ public:
+  explicit SchurComplementSolver(const LinearSolver::Options& options)
+      : options_(options) {
+    CHECK_GT(options.elimination_groups.size(), 1);
+    CHECK_GT(options.elimination_groups[0], 0);
+    CHECK(options.context != NULL);
+  }
+  SchurComplementSolver(const SchurComplementSolver&) = delete;
+  void operator=(const SchurComplementSolver&) = delete;
+
+  // LinearSolver methods
+  virtual ~SchurComplementSolver() {}
+  virtual LinearSolver::Summary SolveImpl(
+      BlockSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* x);
+
+ protected:
+  const LinearSolver::Options& options() const { return options_; }
+
+  const BlockRandomAccessMatrix* lhs() const { return lhs_.get(); }
+  void set_lhs(BlockRandomAccessMatrix* lhs) { lhs_.reset(lhs); }
+  const double* rhs() const { return rhs_.get(); }
+  void set_rhs(double* rhs) { rhs_.reset(rhs); }
+
+ private:
+  virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0;
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* solution) = 0;
+
+  LinearSolver::Options options_;
+
+  std::unique_ptr<SchurEliminatorBase> eliminator_;
+  std::unique_ptr<BlockRandomAccessMatrix> lhs_;
+  std::unique_ptr<double[]> rhs_;
+};
+
+// Dense Cholesky factorization based solver.
+class DenseSchurComplementSolver : public SchurComplementSolver {
+ public:
+  explicit DenseSchurComplementSolver(const LinearSolver::Options& options)
+      : SchurComplementSolver(options) {}
+  DenseSchurComplementSolver(const DenseSchurComplementSolver&) = delete;
+  void operator=(const DenseSchurComplementSolver&) = delete;
+
+  virtual ~DenseSchurComplementSolver() {}
+
+ private:
+  virtual void InitStorage(const CompressedRowBlockStructure* bs);
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* solution);
+};
+
+// Sparse Cholesky factorization based solver.
+class SparseSchurComplementSolver : public SchurComplementSolver {
+ public:
+  explicit SparseSchurComplementSolver(const LinearSolver::Options& options);
+  SparseSchurComplementSolver(const SparseSchurComplementSolver&) = delete;
+  void operator=(const SparseSchurComplementSolver&) = delete;
+
+  virtual ~SparseSchurComplementSolver();
+
+ private:
+  virtual void InitStorage(const CompressedRowBlockStructure* bs);
+  virtual LinearSolver::Summary SolveReducedLinearSystem(
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* solution);
+  LinearSolver::Summary SolveReducedLinearSystemUsingConjugateGradients(
+      const LinearSolver::PerSolveOptions& per_solve_options,
+      double* solution);
+
+  // Size of the blocks in the Schur complement.
+  std::vector<int> blocks_;
+  std::unique_ptr<SparseCholesky> sparse_cholesky_;
+  std::unique_ptr<BlockRandomAccessDiagonalMatrix> preconditioner_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
new file mode 100644
index 0000000..23d3674
--- /dev/null
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -0,0 +1,252 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/schur_complement_solver.h"
+
+#include <cstddef>
+#include <memory>
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/detect_structure.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class SchurComplementSolverTest : public ::testing::Test {
+ protected:
+  void SetUpFromProblemId(int problem_id) {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(problem_id));
+
+    CHECK(problem != nullptr);
+    A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    b.reset(problem->b.release());
+    D.reset(problem->D.release());
+
+    num_cols = A->num_cols();
+    num_rows = A->num_rows();
+    num_eliminate_blocks = problem->num_eliminate_blocks;
+
+    x.resize(num_cols);
+    sol.resize(num_cols);
+    sol_d.resize(num_cols);
+
+    LinearSolver::Options options;
+    options.type = DENSE_QR;
+    ContextImpl context;
+    options.context = &context;
+
+    std::unique_ptr<LinearSolver> qr(LinearSolver::Create(options));
+
+    TripletSparseMatrix triplet_A(A->num_rows(),
+                                  A->num_cols(),
+                                  A->num_nonzeros());
+    A->ToTripletSparseMatrix(&triplet_A);
+
+    // Gold standard solutions using dense QR factorization.
+    DenseSparseMatrix dense_A(triplet_A);
+    qr->Solve(&dense_A, b.get(), LinearSolver::PerSolveOptions(), sol.data());
+
+    // Gold standard solution with appended diagonal.
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D.get();
+    qr->Solve(&dense_A, b.get(), per_solve_options, sol_d.data());
+  }
+
+  void ComputeAndCompareSolutions(
+      int problem_id,
+      bool regularization,
+      ceres::LinearSolverType linear_solver_type,
+      ceres::DenseLinearAlgebraLibraryType dense_linear_algebra_library_type,
+      ceres::SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+      bool use_postordering) {
+    SetUpFromProblemId(problem_id);
+    LinearSolver::Options options;
+    options.elimination_groups.push_back(num_eliminate_blocks);
+    options.elimination_groups.push_back(
+        A->block_structure()->cols.size() - num_eliminate_blocks);
+    options.type = linear_solver_type;
+    options.dense_linear_algebra_library_type =
+        dense_linear_algebra_library_type;
+    options.sparse_linear_algebra_library_type =
+        sparse_linear_algebra_library_type;
+    options.use_postordering = use_postordering;
+    ContextImpl context;
+    options.context = &context;
+    DetectStructure(*A->block_structure(),
+                    num_eliminate_blocks,
+                    &options.row_block_size,
+                    &options.e_block_size,
+                    &options.f_block_size);
+
+    std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
+    LinearSolver::PerSolveOptions per_solve_options;
+    LinearSolver::Summary summary;
+    if (regularization) {
+      per_solve_options.D = D.get();
+    }
+
+    summary = solver->Solve(A.get(), b.get(), per_solve_options, x.data());
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+    if (regularization) {
+
+      ASSERT_NEAR((sol_d - x).norm() / num_cols, 0, 1e-10)
+          << "Regularized Expected solution: " << sol_d.transpose()
+          << " Actual solution: " << x.transpose();
+    } else {
+      ASSERT_NEAR((sol - x).norm() / num_cols, 0, 1e-10)
+          << "Unregularized Expected solution: " << sol.transpose()
+          << " Actual solution: " << x.transpose();
+    }
+  }
+
+  int num_rows;
+  int num_cols;
+  int num_eliminate_blocks;
+
+  std::unique_ptr<BlockSparseMatrix> A;
+  std::unique_ptr<double[]> b;
+  std::unique_ptr<double[]> D;
+  Vector x;
+  Vector sol;
+  Vector sol_d;
+};
+
+// TODO(sameeragarwal): Refactor these using value parameterized tests.
+// TODO(sameeragarwal): More extensive tests using random matrices.
+TEST_F(SchurComplementSolverTest, DenseSchurWithEigenSmallProblem) {
+  ComputeAndCompareSolutions(2, false, DENSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, DENSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, DenseSchurWithEigenLargeProblem) {
+  ComputeAndCompareSolutions(3, false, DENSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, DENSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, DenseSchurWithEigenVaryingFBlockSize) {
+  ComputeAndCompareSolutions(4, true, DENSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+}
+
+#ifndef CERES_NO_LAPACK
+TEST_F(SchurComplementSolverTest, DenseSchurWithLAPACKSmallProblem) {
+  ComputeAndCompareSolutions(2, false, DENSE_SCHUR, LAPACK, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, DENSE_SCHUR, LAPACK, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest, DenseSchurWithLAPACKLargeProblem) {
+  ComputeAndCompareSolutions(3, false, DENSE_SCHUR, LAPACK, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, DENSE_SCHUR, LAPACK, SUITE_SPARSE, true);
+}
+#endif
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithSuiteSparseSmallProblemNoPostOrdering) {
+  ComputeAndCompareSolutions(
+      2, false, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, false);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, false);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithSuiteSparseSmallProblemPostOrdering) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithSuiteSparseLargeProblemNoPostOrdering) {
+  ComputeAndCompareSolutions(
+      3, false, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, false);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, false);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithSuiteSparseLargeProblemPostOrdering) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, SUITE_SPARSE, true);
+}
+#endif  // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithCXSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithCXSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
+}
+#endif  // CERES_NO_CXSPARSE
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithAccelerateSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithAccelerateSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, ACCELERATE_SPARSE, true);
+}
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseSmallProblem) {
+  ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+       SparseSchurWithEigenSparseLargeProblem) {
+  ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+  ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_eliminator.cc b/internal/ceres/schur_eliminator.cc
new file mode 100644
index 0000000..beefa14
--- /dev/null
+++ b/internal/ceres/schur_eliminator.cc
@@ -0,0 +1,156 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+SchurEliminatorBase*
+SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 2)) {
+   return new SchurEliminator<2, 2, 2>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 3)) {
+   return new SchurEliminator<2, 2, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 4)) {
+   return new SchurEliminator<2, 2, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2)) {
+   return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 3)) {
+   return new SchurEliminator<2, 3, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 4)) {
+   return new SchurEliminator<2, 3, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 6)) {
+   return new SchurEliminator<2, 3, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 9)) {
+   return new SchurEliminator<2, 3, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3)) {
+   return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   return new SchurEliminator<2, 4, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   return new SchurEliminator<2, 4, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 6)) {
+   return new SchurEliminator<2, 4, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 8)) {
+   return new SchurEliminator<2, 4, 8>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 9)) {
+   return new SchurEliminator<2, 4, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4)) {
+   return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
+ }
+ if (options.row_block_size == 2){
+   return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 2)) {
+   return new SchurEliminator<4, 4, 2>(options);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   return new SchurEliminator<4, 4, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   return new SchurEliminator<4, 4, 4>(options);
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4)) {
+   return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
+ }
+
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_eliminator.h b/internal/ceres/schur_eliminator.h
new file mode 100644
index 0000000..11e6eba
--- /dev/null
+++ b/internal/ceres/schur_eliminator.h
@@ -0,0 +1,366 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_H_
+#define CERES_INTERNAL_SCHUR_ELIMINATOR_H_
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+// Classes implementing the SchurEliminatorBase interface implement
+// variable elimination for linear least squares problems. Assuming
+// that the input linear system Ax = b can be partitioned into
+//
+//  E y + F z = b
+//
+// Where x = [y;z] is a partition of the variables.  The partitioning
+// of the variables is such that, E'E is a block diagonal matrix. Or
+// in other words, the parameter blocks in E form an independent set
+// of the of the graph implied by the block matrix A'A. Then, this
+// class provides the functionality to compute the Schur complement
+// system
+//
+//   S z = r
+//
+// where
+//
+//   S = F'F - F'E (E'E)^{-1} E'F and r = F'b - F'E(E'E)^(-1) E'b
+//
+// This is the Eliminate operation, i.e., construct the linear system
+// obtained by eliminating the variables in E.
+//
+// The eliminator also provides the reverse functionality, i.e. given
+// values for z it can back substitute for the values of y, by solving the
+// linear system
+//
+//  Ey = b - F z
+//
+// which is done by observing that
+//
+//  y = (E'E)^(-1) [E'b - E'F z]
+//
+// The eliminator has a number of requirements.
+//
+// The rows of A are ordered so that for every variable block in y,
+// all the rows containing that variable block occur as a vertically
+// contiguous block. i.e the matrix A looks like
+//
+//              E                 F                   chunk
+//  A = [ y1   0   0   0 |  z1    0    0   0    z5]     1
+//      [ y1   0   0   0 |  z1   z2    0   0     0]     1
+//      [  0  y2   0   0 |   0    0   z3   0     0]     2
+//      [  0   0  y3   0 |  z1   z2   z3  z4    z5]     3
+//      [  0   0  y3   0 |  z1    0    0   0    z5]     3
+//      [  0   0   0  y4 |   0    0    0   0    z5]     4
+//      [  0   0   0  y4 |   0   z2    0   0     0]     4
+//      [  0   0   0  y4 |   0    0    0   0     0]     4
+//      [  0   0   0   0 |  z1    0    0   0     0] non chunk blocks
+//      [  0   0   0   0 |   0    0   z3  z4    z5] non chunk blocks
+//
+// This structure should be reflected in the corresponding
+// CompressedRowBlockStructure object associated with A. The linear
+// system Ax = b should either be well posed or the array D below
+// should be non-null and the diagonal matrix corresponding to it
+// should be non-singular. For simplicity of exposition only the case
+// with a null D is described.
+//
+// The usual way to do the elimination is as follows. Starting with
+//
+//  E y + F z = b
+//
+// we can form the normal equations,
+//
+//  E'E y + E'F z = E'b
+//  F'E y + F'F z = F'b
+//
+// multiplying both sides of the first equation by (E'E)^(-1) and then
+// by F'E we get
+//
+//  F'E y + F'E (E'E)^(-1) E'F z =  F'E (E'E)^(-1) E'b
+//  F'E y +                F'F z =  F'b
+//
+// now subtracting the two equations we get
+//
+// [FF' - F'E (E'E)^(-1) E'F] z = F'b - F'E(E'E)^(-1) E'b
+//
+// Instead of forming the normal equations and operating on them as
+// general sparse matrices, the algorithm here deals with one
+// parameter block in y at a time. The rows corresponding to a single
+// parameter block yi are known as a chunk, and the algorithm operates
+// on one chunk at a time. The mathematics remains the same since the
+// reduced linear system can be shown to be the sum of the reduced
+// linear systems for each chunk. This can be seen by observing two
+// things.
+//
+//  1. E'E is a block diagonal matrix.
+//
+//  2. When E'F is computed, only the terms within a single chunk
+//  interact, i.e for y1 column blocks when transposed and multiplied
+//  with F, the only non-zero contribution comes from the blocks in
+//  chunk1.
+//
+// Thus, the reduced linear system
+//
+//  FF' - F'E (E'E)^(-1) E'F
+//
+// can be re-written as
+//
+//  sum_k F_k F_k' - F_k'E_k (E_k'E_k)^(-1) E_k' F_k
+//
+// Where the sum is over chunks and E_k'E_k is dense matrix of size y1
+// x y1.
+//
+// Advanced usage. Until now it has been assumed that the user would
+// be interested in all of the Schur Complement S. However, it is also
+// possible to use this eliminator to obtain an arbitrary submatrix of
+// the full Schur complement. When the eliminator is generating the
+// blocks of S, it asks the RandomAccessBlockMatrix instance passed to
+// it if it has storage for that block. If it does, the eliminator
+// computes/updates it, if not it is skipped. This is useful when one
+// is interested in constructing a preconditioner based on the Schur
+// Complement, e.g., computing the block diagonal of S so that it can
+// be used as a preconditioner for an Iterative Substructuring based
+// solver [See Agarwal et al, Bundle Adjustment in the Large, ECCV
+// 2008 for an example of such use].
+//
+// Example usage: Please see schur_complement_solver.cc
+class SchurEliminatorBase {
+ public:
+  virtual ~SchurEliminatorBase() {}
+
+  // Initialize the eliminator. It is the user's responsibilty to call
+  // this function before calling Eliminate or BackSubstitute. It is
+  // also the caller's responsibilty to ensure that the
+  // CompressedRowBlockStructure object passed to this method is the
+  // same one (or is equivalent to) the one associated with the
+  // BlockSparseMatrix objects below.
+  //
+  // assume_full_rank_ete controls how the eliminator inverts with the
+  // diagonal blocks corresponding to e blocks in A'A. If
+  // assume_full_rank_ete is true, then a Cholesky factorization is
+  // used to compute the inverse, otherwise a singular value
+  // decomposition is used to compute the pseudo inverse.
+  virtual void Init(int num_eliminate_blocks,
+                    bool assume_full_rank_ete,
+                    const CompressedRowBlockStructure* bs) = 0;
+
+  // Compute the Schur complement system from the augmented linear
+  // least squares problem [A;D] x = [b;0]. The left hand side and the
+  // right hand side of the reduced linear system are returned in lhs
+  // and rhs respectively.
+  //
+  // It is the caller's responsibility to construct and initialize
+  // lhs. Depending upon the structure of the lhs object passed here,
+  // the full or a submatrix of the Schur complement will be computed.
+  //
+  // Since the Schur complement is a symmetric matrix, only the upper
+  // triangular part of the Schur complement is computed.
+  virtual void Eliminate(const BlockSparseMatrix* A,
+                         const double* b,
+                         const double* D,
+                         BlockRandomAccessMatrix* lhs,
+                         double* rhs) = 0;
+
+  // Given values for the variables z in the F block of A, solve for
+  // the optimal values of the variables y corresponding to the E
+  // block in A.
+  virtual void BackSubstitute(const BlockSparseMatrix* A,
+                              const double* b,
+                              const double* D,
+                              const double* z,
+                              double* y) = 0;
+  // Factory
+  static SchurEliminatorBase* Create(const LinearSolver::Options& options);
+};
+
+// Templated implementation of the SchurEliminatorBase interface. The
+// templating is on the sizes of the row, e and f blocks sizes in the
+// input matrix. In many problems, the sizes of one or more of these
+// blocks are constant, in that case, its worth passing these
+// parameters as template arguments so that they are visible to the
+// compiler and can be used for compile time optimization of the low
+// level linear algebra routines.
+template <int kRowBlockSize = Eigen::Dynamic,
+          int kEBlockSize = Eigen::Dynamic,
+          int kFBlockSize = Eigen::Dynamic >
+class SchurEliminator : public SchurEliminatorBase {
+ public:
+  explicit SchurEliminator(const LinearSolver::Options& options)
+      : num_threads_(options.num_threads),
+        context_(options.context) {
+    CHECK(context_ != nullptr);
+}
+
+  // SchurEliminatorBase Interface
+  virtual ~SchurEliminator();
+  virtual void Init(int num_eliminate_blocks,
+                    bool assume_full_rank_ete,
+                    const CompressedRowBlockStructure* bs);
+  virtual void Eliminate(const BlockSparseMatrix* A,
+                         const double* b,
+                         const double* D,
+                         BlockRandomAccessMatrix* lhs,
+                         double* rhs);
+  virtual void BackSubstitute(const BlockSparseMatrix* A,
+                              const double* b,
+                              const double* D,
+                              const double* z,
+                              double* y);
+
+ private:
+  // Chunk objects store combinatorial information needed to
+  // efficiently eliminate a whole chunk out of the least squares
+  // problem. Consider the first chunk in the example matrix above.
+  //
+  //      [ y1   0   0   0 |  z1    0    0   0    z5]
+  //      [ y1   0   0   0 |  z1   z2    0   0     0]
+  //
+  // One of the intermediate quantities that needs to be calculated is
+  // for each row the product of the y block transposed with the
+  // non-zero z block, and the sum of these blocks across rows. A
+  // temporary array "buffer_" is used for computing and storing them
+  // and the buffer_layout maps the indices of the z-blocks to
+  // position in the buffer_ array.  The size of the chunk is the
+  // number of row blocks/residual blocks for the particular y block
+  // being considered.
+  //
+  // For the example chunk shown above,
+  //
+  // size = 2
+  //
+  // The entries of buffer_layout will be filled in the following order.
+  //
+  // buffer_layout[z1] = 0
+  // buffer_layout[z5] = y1 * z1
+  // buffer_layout[z2] = y1 * z1 + y1 * z5
+  typedef std::map<int, int> BufferLayoutType;
+  struct Chunk {
+    Chunk() : size(0) {}
+    int size;
+    int start;
+    BufferLayoutType buffer_layout;
+  };
+
+  void ChunkDiagonalBlockAndGradient(
+      const Chunk& chunk,
+      const BlockSparseMatrix* A,
+      const double* b,
+      int row_block_counter,
+      typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* eet,
+      double* g,
+      double* buffer,
+      BlockRandomAccessMatrix* lhs);
+
+  void UpdateRhs(const Chunk& chunk,
+                 const BlockSparseMatrix* A,
+                 const double* b,
+                 int row_block_counter,
+                 const double* inverse_ete_g,
+                 double* rhs);
+
+  void ChunkOuterProduct(int thread_id,
+                         const CompressedRowBlockStructure* bs,
+                         const Matrix& inverse_eet,
+                         const double* buffer,
+                         const BufferLayoutType& buffer_layout,
+                         BlockRandomAccessMatrix* lhs);
+  void EBlockRowOuterProduct(const BlockSparseMatrix* A,
+                             int row_block_index,
+                             BlockRandomAccessMatrix* lhs);
+
+
+  void NoEBlockRowsUpdate(const BlockSparseMatrix* A,
+                             const double* b,
+                             int row_block_counter,
+                             BlockRandomAccessMatrix* lhs,
+                             double* rhs);
+
+  void NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
+                               int row_block_index,
+                               BlockRandomAccessMatrix* lhs);
+
+  int num_threads_;
+  ContextImpl* context_;
+  int num_eliminate_blocks_;
+  bool assume_full_rank_ete_;
+
+  // Block layout of the columns of the reduced linear system. Since
+  // the f blocks can be of varying size, this vector stores the
+  // position of each f block in the row/col of the reduced linear
+  // system. Thus lhs_row_layout_[i] is the row/col position of the
+  // i^th f block.
+  std::vector<int> lhs_row_layout_;
+
+  // Combinatorial structure of the chunks in A. For more information
+  // see the documentation of the Chunk object above.
+  std::vector<Chunk> chunks_;
+
+  // TODO(sameeragarwal): The following two arrays contain per-thread
+  // storage. They should be refactored into a per thread struct.
+
+  // Buffer to store the products of the y and z blocks generated
+  // during the elimination phase. buffer_ is of size num_threads *
+  // buffer_size_. Each thread accesses the chunk
+  //
+  //   [thread_id * buffer_size_ , (thread_id + 1) * buffer_size_]
+  //
+  std::unique_ptr<double[]> buffer_;
+
+  // Buffer to store per thread matrix matrix products used by
+  // ChunkOuterProduct. Like buffer_ it is of size num_threads *
+  // buffer_size_. Each thread accesses the chunk
+  //
+  //   [thread_id * buffer_size_ , (thread_id + 1) * buffer_size_ -1]
+  //
+  std::unique_ptr<double[]> chunk_outer_product_buffer_;
+
+  int buffer_size_;
+  int uneliminated_row_begins_;
+
+  // Locks for the blocks in the right hand side of the reduced linear
+  // system.
+ std::vector<std::mutex*> rhs_locks_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCHUR_ELIMINATOR_H_
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
new file mode 100644
index 0000000..d754d9d
--- /dev/null
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -0,0 +1,706 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
+// Chunk::start ?
+
+#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
+#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
+
+// Eigen has an internal threshold switching between different matrix
+// multiplication algorithms. In particular for matrices larger than
+// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
+// matrix matrix product algorithm that has a higher setup cost. For
+// matrix sizes close to this threshold, especially when the matrices
+// are thin and long, the default choice may not be optimal. This is
+// the case for us, as the default choice causes a 30% performance
+// regression when we moved from Eigen2 to Eigen3.
+
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <algorithm>
+#include <map>
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/invert_psd_matrix.h"
+#include "ceres/map_util.h"
+#include "ceres/parallel_for.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/scoped_thread_token.h"
+#include "ceres/small_blas.h"
+#include "ceres/stl_util.h"
+#include "ceres/thread_token_provider.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
+  STLDeleteElements(&rhs_locks_);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
+    int num_eliminate_blocks,
+    bool assume_full_rank_ete,
+    const CompressedRowBlockStructure* bs) {
+  CHECK_GT(num_eliminate_blocks, 0)
+      << "SchurComplementSolver cannot be initialized with "
+      << "num_eliminate_blocks = 0.";
+
+  num_eliminate_blocks_ = num_eliminate_blocks;
+  assume_full_rank_ete_ = assume_full_rank_ete;
+
+  const int num_col_blocks = bs->cols.size();
+  const int num_row_blocks = bs->rows.size();
+
+  buffer_size_ = 1;
+  chunks_.clear();
+  lhs_row_layout_.clear();
+
+  int lhs_num_rows = 0;
+  // Add a map object for each block in the reduced linear system
+  // and build the row/column block structure of the reduced linear
+  // system.
+  lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
+  for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+    lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
+    lhs_num_rows += bs->cols[i].size;
+  }
+
+  int r = 0;
+  // Iterate over the row blocks of A, and detect the chunks. The
+  // matrix should already have been ordered so that all rows
+  // containing the same y block are vertically contiguous. Along
+  // the way also compute the amount of space each chunk will need
+  // to perform the elimination.
+  while (r < num_row_blocks) {
+    const int chunk_block_id = bs->rows[r].cells.front().block_id;
+    if (chunk_block_id >= num_eliminate_blocks_) {
+      break;
+    }
+
+    chunks_.push_back(Chunk());
+    Chunk& chunk = chunks_.back();
+    chunk.size = 0;
+    chunk.start = r;
+    int buffer_size = 0;
+    const int e_block_size = bs->cols[chunk_block_id].size;
+
+    // Add to the chunk until the first block in the row is
+    // different than the one in the first row for the chunk.
+    while (r + chunk.size < num_row_blocks) {
+      const CompressedRow& row = bs->rows[r + chunk.size];
+      if (row.cells.front().block_id != chunk_block_id) {
+        break;
+      }
+
+      // Iterate over the blocks in the row, ignoring the first
+      // block since it is the one to be eliminated.
+      for (int c = 1; c < row.cells.size(); ++c) {
+        const Cell& cell = row.cells[c];
+        if (InsertIfNotPresent(
+                &(chunk.buffer_layout), cell.block_id, buffer_size)) {
+          buffer_size += e_block_size * bs->cols[cell.block_id].size;
+        }
+      }
+
+      buffer_size_ = std::max(buffer_size, buffer_size_);
+      ++chunk.size;
+    }
+
+    CHECK_GT(chunk.size, 0);
+    r += chunk.size;
+  }
+  const Chunk& chunk = chunks_.back();
+
+  uneliminated_row_begins_ = chunk.start + chunk.size;
+
+  buffer_.reset(new double[buffer_size_ * num_threads_]);
+
+  // chunk_outer_product_buffer_ only needs to store e_block_size *
+  // f_block_size, which is always less than buffer_size_, so we just
+  // allocate buffer_size_ per thread.
+  chunk_outer_product_buffer_.reset(new double[buffer_size_ * num_threads_]);
+
+  STLDeleteElements(&rhs_locks_);
+  rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
+  for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
+    rhs_locks_[i] = new std::mutex;
+  }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+Eliminate(const BlockSparseMatrix* A,
+          const double* b,
+          const double* D,
+          BlockRandomAccessMatrix* lhs,
+          double* rhs) {
+  if (lhs->num_rows() > 0) {
+    lhs->SetZero();
+    if (rhs) {
+      VectorRef(rhs, lhs->num_rows()).setZero();
+    }
+  }
+
+  const CompressedRowBlockStructure* bs = A->block_structure();
+  const int num_col_blocks = bs->cols.size();
+
+  // Add the diagonal to the schur complement.
+  if (D != NULL) {
+    ParallelFor(
+        context_,
+        num_eliminate_blocks_,
+        num_col_blocks,
+        num_threads_,
+        [&](int i) {
+          const int block_id = i - num_eliminate_blocks_;
+          int r, c, row_stride, col_stride;
+          CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c,
+                                             &row_stride, &col_stride);
+          if (cell_info != NULL) {
+            const int block_size = bs->cols[i].size;
+            typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
+                D + bs->cols[i].position, block_size);
+
+            std::lock_guard<std::mutex> l(cell_info->m);
+            MatrixRef m(cell_info->values, row_stride, col_stride);
+            m.block(r, c, block_size, block_size).diagonal() +=
+                diag.array().square().matrix();
+          }
+        });
+  }
+
+  // Eliminate y blocks one chunk at a time.  For each chunk, compute
+  // the entries of the normal equations and the gradient vector block
+  // corresponding to the y block and then apply Gaussian elimination
+  // to them. The matrix ete stores the normal matrix corresponding to
+  // the block being eliminated and array buffer_ contains the
+  // non-zero blocks in the row corresponding to this y block in the
+  // normal equations. This computation is done in
+  // ChunkDiagonalBlockAndGradient. UpdateRhs then applies gaussian
+  // elimination to the rhs of the normal equations, updating the rhs
+  // of the reduced linear system by modifying rhs blocks for all the
+  // z blocks that share a row block/residual term with the y
+  // block. EliminateRowOuterProduct does the corresponding operation
+  // for the lhs of the reduced linear system.
+  ParallelFor(
+      context_,
+      0,
+      int(chunks_.size()),
+      num_threads_,
+      [&](int thread_id, int i) {
+        double* buffer = buffer_.get() + thread_id * buffer_size_;
+        const Chunk& chunk = chunks_[i];
+        const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+        const int e_block_size = bs->cols[e_block_id].size;
+
+        VectorRef(buffer, buffer_size_).setZero();
+
+        typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
+            ete(e_block_size, e_block_size);
+
+        if (D != NULL) {
+          const typename EigenTypes<kEBlockSize>::ConstVectorRef
+              diag(D + bs->cols[e_block_id].position, e_block_size);
+          ete = diag.array().square().matrix().asDiagonal();
+        } else {
+          ete.setZero();
+        }
+
+        FixedArray<double, 8> g(e_block_size);
+        typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
+        gref.setZero();
+
+        // We are going to be computing
+        //
+        //   S += F'F - F'E(E'E)^{-1}E'F
+        //
+        // for each Chunk. The computation is broken down into a number of
+        // function calls as below.
+
+        // Compute the outer product of the e_blocks with themselves (ete
+        // = E'E). Compute the product of the e_blocks with the
+        // corresponding f_blocks (buffer = E'F), the gradient of the terms
+        // in this chunk (g) and add the outer product of the f_blocks to
+        // Schur complement (S += F'F).
+        ChunkDiagonalBlockAndGradient(
+            chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
+
+        // Normally one wouldn't compute the inverse explicitly, but
+        // e_block_size will typically be a small number like 3, in
+        // which case its much faster to compute the inverse once and
+        // use it to multiply other matrices/vectors instead of doing a
+        // Solve call over and over again.
+        typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
+            InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete);
+
+        // For the current chunk compute and update the rhs of the reduced
+        // linear system.
+        //
+        //   rhs = F'b - F'E(E'E)^(-1) E'b
+
+        if (rhs) {
+          FixedArray<double, 8> inverse_ete_g(e_block_size);
+          MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
+              inverse_ete.data(),
+              e_block_size,
+              e_block_size,
+              g.get(),
+              inverse_ete_g.get());
+          UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
+        }
+
+        // S -= F'E(E'E)^{-1}E'F
+        ChunkOuterProduct(
+        thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
+      });
+
+  // For rows with no e_blocks, the schur complement update reduces to
+  // S += F'F.
+  NoEBlockRowsUpdate(A, b,  uneliminated_row_begins_, lhs, rhs);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+BackSubstitute(const BlockSparseMatrix* A,
+               const double* b,
+               const double* D,
+               const double* z,
+               double* y) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+
+  ParallelFor(
+      context_,
+      0,
+      int(chunks_.size()),
+      num_threads_,
+      [&](int i) {
+    const Chunk& chunk = chunks_[i];
+    const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+    const int e_block_size = bs->cols[e_block_id].size;
+
+    double* y_ptr = y + bs->cols[e_block_id].position;
+    typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
+
+    typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
+        ete(e_block_size, e_block_size);
+    if (D != NULL) {
+      const typename EigenTypes<kEBlockSize>::ConstVectorRef
+          diag(D + bs->cols[e_block_id].position, e_block_size);
+      ete = diag.array().square().matrix().asDiagonal();
+    } else {
+      ete.setZero();
+    }
+
+    const double* values = A->values();
+    for (int j = 0; j < chunk.size; ++j) {
+      const CompressedRow& row = bs->rows[chunk.start + j];
+      const Cell& e_cell = row.cells.front();
+      DCHECK_EQ(e_block_id, e_cell.block_id);
+
+      FixedArray<double, 8> sj(row.block.size);
+
+      typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
+          typename EigenTypes<kRowBlockSize>::ConstVectorRef
+          (b + bs->rows[chunk.start + j].block.position, row.block.size);
+
+      for (int c = 1; c < row.cells.size(); ++c) {
+        const int f_block_id = row.cells[c].block_id;
+        const int f_block_size = bs->cols[f_block_id].size;
+        const int r_block = f_block_id - num_eliminate_blocks_;
+
+        MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
+            values + row.cells[c].position, row.block.size, f_block_size,
+            z + lhs_row_layout_[r_block],
+            sj.get());
+      }
+
+      MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+          values + e_cell.position, row.block.size, e_block_size,
+          sj.get(),
+          y_ptr);
+
+      MatrixTransposeMatrixMultiply
+          <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
+          values + e_cell.position, row.block.size, e_block_size,
+          values + e_cell.position, row.block.size, e_block_size,
+          ete.data(), 0, 0, e_block_size, e_block_size);
+    }
+
+    y_block =
+        InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete) * y_block;
+  });
+}
+
+// Update the rhs of the reduced linear system. Compute
+//
+//   F'b - F'E(E'E)^(-1) E'b
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateRhs(const Chunk& chunk,
+          const BlockSparseMatrix* A,
+          const double* b,
+          int row_block_counter,
+          const double* inverse_ete_g,
+          double* rhs) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+  const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+  const int e_block_size = bs->cols[e_block_id].size;
+
+  int b_pos = bs->rows[row_block_counter].block.position;
+  const double* values = A->values();
+  for (int j = 0; j < chunk.size; ++j) {
+    const CompressedRow& row = bs->rows[row_block_counter + j];
+    const Cell& e_cell = row.cells.front();
+
+    typename EigenTypes<kRowBlockSize>::Vector sj =
+        typename EigenTypes<kRowBlockSize>::ConstVectorRef
+        (b + b_pos, row.block.size);
+
+    MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
+        values + e_cell.position, row.block.size, e_block_size,
+        inverse_ete_g, sj.data());
+
+    for (int c = 1; c < row.cells.size(); ++c) {
+      const int block_id = row.cells[c].block_id;
+      const int block_size = bs->cols[block_id].size;
+      const int block = block_id - num_eliminate_blocks_;
+      std::lock_guard<std::mutex> l(*rhs_locks_[block]);
+      MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+          values + row.cells[c].position,
+          row.block.size, block_size,
+          sj.data(), rhs + lhs_row_layout_[block]);
+    }
+    b_pos += row.block.size;
+  }
+}
+
+// Given a Chunk - set of rows with the same e_block, e.g. in the
+// following Chunk with two rows.
+//
+//                E                   F
+//      [ y11   0   0   0 |  z11     0    0   0    z51]
+//      [ y12   0   0   0 |  z12   z22    0   0      0]
+//
+// this function computes twp matrices. The diagonal block matrix
+//
+//   ete = y11 * y11' + y12 * y12'
+//
+// and the off diagonal blocks in the Guass Newton Hessian.
+//
+//   buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
+//
+// which are zero compressed versions of the block sparse matrices E'E
+// and E'F.
+//
+// and the gradient of the e_block, E'b.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ChunkDiagonalBlockAndGradient(
+    const Chunk& chunk,
+    const BlockSparseMatrix* A,
+    const double* b,
+    int row_block_counter,
+    typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
+    double* g,
+    double* buffer,
+    BlockRandomAccessMatrix* lhs) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+
+  int b_pos = bs->rows[row_block_counter].block.position;
+  const int e_block_size = ete->rows();
+
+  // Iterate over the rows in this chunk, for each row, compute the
+  // contribution of its F blocks to the Schur complement, the
+  // contribution of its E block to the matrix EE' (ete), and the
+  // corresponding block in the gradient vector.
+  const double* values = A->values();
+  for (int j = 0; j < chunk.size; ++j) {
+    const CompressedRow& row = bs->rows[row_block_counter + j];
+
+    if (row.cells.size() > 1) {
+      EBlockRowOuterProduct(A, row_block_counter + j, lhs);
+    }
+
+    // Extract the e_block, ETE += E_i' E_i
+    const Cell& e_cell = row.cells.front();
+    MatrixTransposeMatrixMultiply
+        <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
+            values + e_cell.position, row.block.size, e_block_size,
+            values + e_cell.position, row.block.size, e_block_size,
+            ete->data(), 0, 0, e_block_size, e_block_size);
+
+    if (b) {
+      // g += E_i' b_i
+      MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+          values + e_cell.position, row.block.size, e_block_size,
+          b + b_pos,
+          g);
+    }
+
+    // buffer = E'F. This computation is done by iterating over the
+    // f_blocks for each row in the chunk.
+    for (int c = 1; c < row.cells.size(); ++c) {
+      const int f_block_id = row.cells[c].block_id;
+      const int f_block_size = bs->cols[f_block_id].size;
+      double* buffer_ptr =
+          buffer +  FindOrDie(chunk.buffer_layout, f_block_id);
+      MatrixTransposeMatrixMultiply
+          <kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
+          values + e_cell.position, row.block.size, e_block_size,
+          values + row.cells[c].position, row.block.size, f_block_size,
+          buffer_ptr, 0, 0, e_block_size, f_block_size);
+    }
+    b_pos += row.block.size;
+  }
+}
+
+// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
+// Schur complement matrix, i.e
+//
+//  S -= F'E(E'E)^{-1}E'F.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ChunkOuterProduct(int thread_id,
+                  const CompressedRowBlockStructure* bs,
+                  const Matrix& inverse_ete,
+                  const double* buffer,
+                  const BufferLayoutType& buffer_layout,
+                  BlockRandomAccessMatrix* lhs) {
+  // This is the most computationally expensive part of this
+  // code. Profiling experiments reveal that the bottleneck is not the
+  // computation of the right-hand matrix product, but memory
+  // references to the left hand side.
+  const int e_block_size = inverse_ete.rows();
+  BufferLayoutType::const_iterator it1 = buffer_layout.begin();
+
+  double* b1_transpose_inverse_ete =
+      chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
+
+  // S(i,j) -= bi' * ete^{-1} b_j
+  for (; it1 != buffer_layout.end(); ++it1) {
+    const int block1 = it1->first - num_eliminate_blocks_;
+    const int block1_size = bs->cols[it1->first].size;
+    MatrixTransposeMatrixMultiply
+        <kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
+        buffer + it1->second, e_block_size, block1_size,
+        inverse_ete.data(), e_block_size, e_block_size,
+        b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
+
+    BufferLayoutType::const_iterator it2 = it1;
+    for (; it2 != buffer_layout.end(); ++it2) {
+      const int block2 = it2->first - num_eliminate_blocks_;
+
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = lhs->GetCell(block1, block2,
+                                         &r, &c,
+                                         &row_stride, &col_stride);
+      if (cell_info != NULL) {
+        const int block2_size = bs->cols[it2->first].size;
+        std::lock_guard<std::mutex> l(cell_info->m);
+        MatrixMatrixMultiply
+            <kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
+                b1_transpose_inverse_ete, block1_size, e_block_size,
+                buffer  + it2->second, e_block_size, block2_size,
+                cell_info->values, r, c, row_stride, col_stride);
+      }
+    }
+  }
+}
+
+// For rows with no e_blocks, the schur complement update reduces to S
+// += F'F. This function iterates over the rows of A with no e_block,
+// and calls NoEBlockRowOuterProduct on each row.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+NoEBlockRowsUpdate(const BlockSparseMatrix* A,
+                   const double* b,
+                   int row_block_counter,
+                   BlockRandomAccessMatrix* lhs,
+                   double* rhs) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+  const double* values = A->values();
+  for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
+    NoEBlockRowOuterProduct(A, row_block_counter, lhs);
+    if (!rhs) {
+      continue;
+    }
+    const CompressedRow& row = bs->rows[row_block_counter];
+    for (int c = 0; c < row.cells.size(); ++c) {
+      const int block_id = row.cells[c].block_id;
+      const int block_size = bs->cols[block_id].size;
+      const int block = block_id - num_eliminate_blocks_;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          values + row.cells[c].position, row.block.size, block_size,
+          b + row.block.position,
+          rhs + lhs_row_layout_[block]);
+    }
+  }
+}
+
+
+// A row r of A, which has no e_blocks gets added to the Schur
+// Complement as S += r r'. This function is responsible for computing
+// the contribution of a single row r to the Schur complement. It is
+// very similar in structure to EBlockRowOuterProduct except for
+// one difference. It does not use any of the template
+// parameters. This is because the algorithm used for detecting the
+// static structure of the matrix A only pays attention to rows with
+// e_blocks. This is because rows without e_blocks are rare and
+// typically arise from regularization terms in the original
+// optimization problem, and have a very different structure than the
+// rows with e_blocks. Including them in the static structure
+// detection will lead to most template parameters being set to
+// dynamic. Since the number of rows without e_blocks is small, the
+// lack of templating is not an issue.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
+                        int row_block_index,
+                        BlockRandomAccessMatrix* lhs) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+  const CompressedRow& row = bs->rows[row_block_index];
+  const double* values = A->values();
+  for (int i = 0; i < row.cells.size(); ++i) {
+    const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
+    DCHECK_GE(block1, 0);
+
+    const int block1_size = bs->cols[row.cells[i].block_id].size;
+    int r, c, row_stride, col_stride;
+    CellInfo* cell_info = lhs->GetCell(block1, block1,
+                                       &r, &c,
+                                       &row_stride, &col_stride);
+    if (cell_info != NULL) {
+      std::lock_guard<std::mutex> l(cell_info->m);
+      // This multiply currently ignores the fact that this is a
+      // symmetric outer product.
+      MatrixTransposeMatrixMultiply
+          <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
+              values + row.cells[i].position, row.block.size, block1_size,
+              values + row.cells[i].position, row.block.size, block1_size,
+              cell_info->values, r, c, row_stride, col_stride);
+    }
+
+    for (int j = i + 1; j < row.cells.size(); ++j) {
+      const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
+      DCHECK_GE(block2, 0);
+      DCHECK_LT(block1, block2);
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = lhs->GetCell(block1, block2,
+                                         &r, &c,
+                                         &row_stride, &col_stride);
+      if (cell_info != NULL) {
+        const int block2_size = bs->cols[row.cells[j].block_id].size;
+        std::lock_guard<std::mutex> l(cell_info->m);
+        MatrixTransposeMatrixMultiply
+            <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
+                values + row.cells[i].position, row.block.size, block1_size,
+                values + row.cells[j].position, row.block.size, block2_size,
+                cell_info->values, r, c, row_stride, col_stride);
+      }
+    }
+  }
+}
+
+// For a row with an e_block, compute the contribution S += F'F. This
+// function has the same structure as NoEBlockRowOuterProduct, except
+// that this function uses the template parameters.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+EBlockRowOuterProduct(const BlockSparseMatrix* A,
+                      int row_block_index,
+                      BlockRandomAccessMatrix* lhs) {
+  const CompressedRowBlockStructure* bs = A->block_structure();
+  const CompressedRow& row = bs->rows[row_block_index];
+  const double* values = A->values();
+  for (int i = 1; i < row.cells.size(); ++i) {
+    const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
+    DCHECK_GE(block1, 0);
+
+    const int block1_size = bs->cols[row.cells[i].block_id].size;
+    int r, c, row_stride, col_stride;
+    CellInfo* cell_info = lhs->GetCell(block1, block1,
+                                       &r, &c,
+                                       &row_stride, &col_stride);
+    if (cell_info != NULL) {
+      std::lock_guard<std::mutex> l(cell_info->m);
+      // block += b1.transpose() * b1;
+      MatrixTransposeMatrixMultiply
+          <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
+          values + row.cells[i].position, row.block.size, block1_size,
+          values + row.cells[i].position, row.block.size, block1_size,
+          cell_info->values, r, c, row_stride, col_stride);
+    }
+
+    for (int j = i + 1; j < row.cells.size(); ++j) {
+      const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
+      DCHECK_GE(block2, 0);
+      DCHECK_LT(block1, block2);
+      const int block2_size = bs->cols[row.cells[j].block_id].size;
+      int r, c, row_stride, col_stride;
+      CellInfo* cell_info = lhs->GetCell(block1, block2,
+                                         &r, &c,
+                                         &row_stride, &col_stride);
+      if (cell_info != NULL) {
+        // block += b1.transpose() * b2;
+        std::lock_guard<std::mutex> l(cell_info->m);
+        MatrixTransposeMatrixMultiply
+            <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
+                values + row.cells[i].position, row.block.size, block1_size,
+                values + row.cells[j].position, row.block.size, block2_size,
+                cell_info->values, r, c, row_stride, col_stride);
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
diff --git a/internal/ceres/schur_eliminator_template.py b/internal/ceres/schur_eliminator_template.py
new file mode 100644
index 0000000..2f38cf5
--- /dev/null
+++ b/internal/ceres/schur_eliminator_template.py
@@ -0,0 +1,155 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2017 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+#   used to endorse or promote products derived from this software without
+#   specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# SchurEliminator class. It is a rather large class
+# and the number of explicit instantiations is also large. Explicitly
+# generating these instantiations in separate .cc files breaks the
+# compilation into separate compilation unit rather than one large cc
+# file which takes 2+GB of RAM to compile.
+#
+# This script creates two sets of files.
+#
+# 1. schur_eliminator_x_x_x.cc
+# where, the x indicates the template parameters and
+#
+# 2. schur_eliminator.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+# Set of template specializations to generate
+
+HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+"""
+
+DYNAMIC_FILE = """
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+"""
+
+SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<%s, %s, %s>;
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_FILE_HEADER = """
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+SchurEliminatorBase*
+SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY = """ return new SchurEliminator<%s, %s, %s>(options);"""
+
+FACTORY_FOOTER = """
+#endif
+  VLOG(1) << "Template specializations not found for <"
+          << options.row_block_size << ","
+          << options.e_block_size << ","
+          << options.f_block_size << ">";
+  return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
+}
+
+}  // namespace internal
+}  // namespace ceres
+"""
diff --git a/internal/ceres/schur_eliminator_test.cc b/internal/ceres/schur_eliminator_test.cc
new file mode 100644
index 0000000..2e8492f
--- /dev/null
+++ b/internal/ceres/schur_eliminator_test.cc
@@ -0,0 +1,230 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/schur_eliminator.h"
+
+#include <memory>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/detect_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/test_util.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+// TODO(sameeragarwal): Reduce the size of these tests and redo the
+// parameterization to be more efficient.
+
+namespace ceres {
+namespace internal {
+
+class SchurEliminatorTest : public ::testing::Test {
+ protected:
+  void SetUpFromId(int id) {
+    std::unique_ptr<LinearLeastSquaresProblem>
+        problem(CreateLinearLeastSquaresProblemFromId(id));
+    CHECK(problem != nullptr);
+    SetupHelper(problem.get());
+  }
+
+  void SetupHelper(LinearLeastSquaresProblem* problem) {
+    A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    b.reset(problem->b.release());
+    D.reset(problem->D.release());
+
+    num_eliminate_blocks = problem->num_eliminate_blocks;
+    num_eliminate_cols = 0;
+    const CompressedRowBlockStructure* bs = A->block_structure();
+
+    for (int i = 0; i < num_eliminate_blocks; ++i) {
+      num_eliminate_cols += bs->cols[i].size;
+    }
+  }
+
+  // Compute the golden values for the reduced linear system and the
+  // solution to the linear least squares problem using dense linear
+  // algebra.
+  void ComputeReferenceSolution(const Vector& D) {
+    Matrix J;
+    A->ToDenseMatrix(&J);
+    VectorRef f(b.get(), J.rows());
+
+    Matrix H  =  (D.cwiseProduct(D)).asDiagonal();
+    H.noalias() += J.transpose() * J;
+
+    const Vector g = J.transpose() * f;
+    const int schur_size = J.cols() - num_eliminate_cols;
+
+    lhs_expected.resize(schur_size, schur_size);
+    lhs_expected.setZero();
+
+    rhs_expected.resize(schur_size);
+    rhs_expected.setZero();
+
+    sol_expected.resize(J.cols());
+    sol_expected.setZero();
+
+    Matrix P = H.block(0, 0, num_eliminate_cols, num_eliminate_cols);
+    Matrix Q = H.block(0,
+                       num_eliminate_cols,
+                       num_eliminate_cols,
+                       schur_size);
+    Matrix R = H.block(num_eliminate_cols,
+                       num_eliminate_cols,
+                       schur_size,
+                       schur_size);
+    int row = 0;
+    const CompressedRowBlockStructure* bs = A->block_structure();
+    for (int i = 0; i < num_eliminate_blocks; ++i) {
+      const int block_size =  bs->cols[i].size;
+      P.block(row, row,  block_size, block_size) =
+          P
+          .block(row, row,  block_size, block_size)
+          .llt()
+          .solve(Matrix::Identity(block_size, block_size));
+      row += block_size;
+    }
+
+    lhs_expected
+        .triangularView<Eigen::Upper>() = R - Q.transpose() * P * Q;
+    rhs_expected =
+        g.tail(schur_size) - Q.transpose() * P * g.head(num_eliminate_cols);
+    sol_expected = H.llt().solve(g);
+  }
+
+  void EliminateSolveAndCompare(const VectorRef& diagonal,
+                                bool use_static_structure,
+                                const double relative_tolerance) {
+    const CompressedRowBlockStructure* bs = A->block_structure();
+    const int num_col_blocks = bs->cols.size();
+    std::vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
+    for (int i = num_eliminate_blocks; i < num_col_blocks; ++i) {
+      blocks[i - num_eliminate_blocks] = bs->cols[i].size;
+    }
+
+    BlockRandomAccessDenseMatrix lhs(blocks);
+
+    const int num_cols = A->num_cols();
+    const int schur_size = lhs.num_rows();
+
+    Vector rhs(schur_size);
+
+    LinearSolver::Options options;
+    ContextImpl context;
+    options.context = &context;
+    options.elimination_groups.push_back(num_eliminate_blocks);
+    if (use_static_structure) {
+      DetectStructure(*bs,
+                      num_eliminate_blocks,
+                      &options.row_block_size,
+                      &options.e_block_size,
+                      &options.f_block_size);
+    }
+
+    std::unique_ptr<SchurEliminatorBase> eliminator;
+    eliminator.reset(SchurEliminatorBase::Create(options));
+    const bool kFullRankETE = true;
+    eliminator->Init(num_eliminate_blocks, kFullRankETE, A->block_structure());
+    eliminator->Eliminate(A.get(), b.get(), diagonal.data(), &lhs, rhs.data());
+
+    MatrixRef lhs_ref(lhs.mutable_values(), lhs.num_rows(), lhs.num_cols());
+    Vector reduced_sol  =
+        lhs_ref
+        .selfadjointView<Eigen::Upper>()
+        .llt()
+        .solve(rhs);
+
+    // Solution to the linear least squares problem.
+    Vector sol(num_cols);
+    sol.setZero();
+    sol.tail(schur_size) = reduced_sol;
+    eliminator->BackSubstitute(A.get(),
+                               b.get(),
+                               diagonal.data(),
+                               reduced_sol.data(),
+                               sol.data());
+
+    Matrix delta = (lhs_ref - lhs_expected).selfadjointView<Eigen::Upper>();
+    double diff = delta.norm();
+    EXPECT_NEAR(diff / lhs_expected.norm(), 0.0, relative_tolerance);
+    EXPECT_NEAR((rhs - rhs_expected).norm() / rhs_expected.norm(), 0.0,
+                relative_tolerance);
+    EXPECT_NEAR((sol - sol_expected).norm() / sol_expected.norm(), 0.0,
+                relative_tolerance);
+  }
+
+  std::unique_ptr<BlockSparseMatrix> A;
+  std::unique_ptr<double[]> b;
+  std::unique_ptr<double[]> D;
+  int num_eliminate_blocks;
+  int num_eliminate_cols;
+
+  Matrix lhs_expected;
+  Vector rhs_expected;
+  Vector sol_expected;
+};
+
+TEST_F(SchurEliminatorTest, ScalarProblemNoRegularization) {
+  SetUpFromId(2);
+  Vector zero(A->num_cols());
+  zero.setZero();
+
+  ComputeReferenceSolution(VectorRef(zero.data(), A->num_cols()));
+  EliminateSolveAndCompare(VectorRef(zero.data(), A->num_cols()), true, 1e-14);
+  EliminateSolveAndCompare(VectorRef(zero.data(), A->num_cols()), false, 1e-14);
+}
+
+TEST_F(SchurEliminatorTest, ScalarProblemWithRegularization) {
+  SetUpFromId(2);
+  ComputeReferenceSolution(VectorRef(D.get(), A->num_cols()));
+  EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), true, 1e-14);
+  EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), false, 1e-14);
+}
+
+TEST_F(SchurEliminatorTest, VaryingFBlockSizeWithStaticStructure) {
+  SetUpFromId(4);
+  ComputeReferenceSolution(VectorRef(D.get(), A->num_cols()));
+  EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), true, 1e-14);
+}
+
+TEST_F(SchurEliminatorTest, VaryingFBlockSizeWithoutStaticStructure) {
+  SetUpFromId(4);
+  ComputeReferenceSolution(VectorRef(D.get(), A->num_cols()));
+  EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), false, 1e-14);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
new file mode 100644
index 0000000..1500650
--- /dev/null
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/schur_jacobi_preconditioner.h"
+
+#include <utility>
+#include <vector>
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+SchurJacobiPreconditioner::SchurJacobiPreconditioner(
+    const CompressedRowBlockStructure& bs,
+    const Preconditioner::Options& options)
+    : options_(options) {
+  CHECK_GT(options_.elimination_groups.size(), 1);
+  CHECK_GT(options_.elimination_groups[0], 0);
+  const int num_blocks = bs.cols.size() - options_.elimination_groups[0];
+  CHECK_GT(num_blocks, 0)
+      << "Jacobian should have at least 1 f_block for "
+      << "SCHUR_JACOBI preconditioner.";
+  CHECK(options_.context != NULL);
+
+  std::vector<int> blocks(num_blocks);
+  for (int i = 0; i < num_blocks; ++i) {
+    blocks[i] = bs.cols[i + options_.elimination_groups[0]].size;
+  }
+
+  m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
+  InitEliminator(bs);
+}
+
+SchurJacobiPreconditioner::~SchurJacobiPreconditioner() {
+}
+
+// Initialize the SchurEliminator.
+void SchurJacobiPreconditioner::InitEliminator(
+    const CompressedRowBlockStructure& bs) {
+  LinearSolver::Options eliminator_options;
+  eliminator_options.elimination_groups = options_.elimination_groups;
+  eliminator_options.num_threads = options_.num_threads;
+  eliminator_options.e_block_size = options_.e_block_size;
+  eliminator_options.f_block_size = options_.f_block_size;
+  eliminator_options.row_block_size = options_.row_block_size;
+  eliminator_options.context = options_.context;
+  eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+  const bool kFullRankETE = true;
+  eliminator_->Init(
+      eliminator_options.elimination_groups[0], kFullRankETE, &bs);
+}
+
+// Update the values of the preconditioner matrix and factorize it.
+bool SchurJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+                                           const double* D) {
+  const int num_rows = m_->num_rows();
+  CHECK_GT(num_rows, 0);
+
+  // Compute a subset of the entries of the Schur complement.
+  eliminator_->Eliminate(&A, nullptr, D, m_.get(), nullptr);
+  m_->Invert();
+  return true;
+}
+
+void SchurJacobiPreconditioner::RightMultiply(const double* x,
+                                              double* y) const {
+  m_->RightMultiply(x, y);
+}
+
+int SchurJacobiPreconditioner::num_rows() const {
+  return m_->num_rows();
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
new file mode 100644
index 0000000..c95468f
--- /dev/null
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Detailed descriptions of these preconditions beyond what is
+// documented here can be found in
+//
+// Bundle Adjustment in the Large
+// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010
+// http://www.cs.washington.edu/homes/sagarwal/bal.pdf
+
+#ifndef CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_
+#define CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_
+
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "ceres/preconditioner.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockRandomAccessDiagonalMatrix;
+class BlockSparseMatrix;
+struct CompressedRowBlockStructure;
+class SchurEliminatorBase;
+
+// This class implements the SCHUR_JACOBI preconditioner for Structure
+// from Motion/Bundle Adjustment problems. Full mathematical details
+// can be found in
+//
+// Bundle Adjustment in the Large
+// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010
+// http://www.cs.washington.edu/homes/sagarwal/bal.pdf
+//
+// Example usage:
+//
+//   Preconditioner::Options options;
+//   options.preconditioner_type = SCHUR_JACOBI;
+//   options.elimination_groups.push_back(num_points);
+//   options.elimination_groups.push_back(num_cameras);
+//   SchurJacobiPreconditioner preconditioner(
+//      *A.block_structure(), options);
+//   preconditioner.Update(A, NULL);
+//   preconditioner.RightMultiply(x, y);
+//
+class SchurJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
+ public:
+  // Initialize the symbolic structure of the preconditioner. bs is
+  // the block structure of the linear system to be solved. It is used
+  // to determine the sparsity structure of the preconditioner matrix.
+  //
+  // It has the same structural requirement as other Schur complement
+  // based solvers. Please see schur_eliminator.h for more details.
+  SchurJacobiPreconditioner(const CompressedRowBlockStructure& bs,
+                            const Preconditioner::Options& options);
+  SchurJacobiPreconditioner(const SchurJacobiPreconditioner&) = delete;
+  void operator=(const SchurJacobiPreconditioner&) = delete;
+
+  virtual ~SchurJacobiPreconditioner();
+
+  // Preconditioner interface.
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual int num_rows() const;
+
+ private:
+  void InitEliminator(const CompressedRowBlockStructure& bs);
+  virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+
+  Preconditioner::Options options_;
+  std::unique_ptr<SchurEliminatorBase> eliminator_;
+  // Preconditioner matrix.
+  std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_
diff --git a/internal/ceres/schur_templates.cc b/internal/ceres/schur_templates.cc
new file mode 100644
index 0000000..64bc9f3
--- /dev/null
+++ b/internal/ceres/schur_templates.cc
@@ -0,0 +1,219 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// What template specializations are available.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+#include "ceres/internal/eigen.h"
+#include "ceres/schur_templates.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+                                        int* e_block_size,
+                                        int* f_block_size) {
+  LinearSolver::Options options;
+  options.row_block_size = *row_block_size;
+  options.e_block_size = *e_block_size;
+  options.f_block_size = *f_block_size;
+  *row_block_size = Eigen::Dynamic;
+  *e_block_size = Eigen::Dynamic;
+  *f_block_size = Eigen::Dynamic;
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 2)) {
+   *row_block_size = 2;
+   *e_block_size = 2;
+   *f_block_size = 2;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 3)) {
+   *row_block_size = 2;
+   *e_block_size = 2;
+   *f_block_size = 3;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2) &&
+     (options.f_block_size == 4)) {
+   *row_block_size = 2;
+   *e_block_size = 2;
+   *f_block_size = 4;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 2)) {
+   *row_block_size = 2;
+   *e_block_size = 2;
+   *f_block_size = Eigen::Dynamic;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 3)) {
+   *row_block_size = 2;
+   *e_block_size = 3;
+   *f_block_size = 3;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 4)) {
+   *row_block_size = 2;
+   *e_block_size = 3;
+   *f_block_size = 4;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 6)) {
+   *row_block_size = 2;
+   *e_block_size = 3;
+   *f_block_size = 6;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3) &&
+     (options.f_block_size == 9)) {
+   *row_block_size = 2;
+   *e_block_size = 3;
+   *f_block_size = 9;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 3)) {
+   *row_block_size = 2;
+   *e_block_size = 3;
+   *f_block_size = Eigen::Dynamic;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = 3;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = 4;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 6)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = 6;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 8)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = 8;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 9)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = 9;
+  return;
+ }
+ if ((options.row_block_size == 2) &&
+     (options.e_block_size == 4)) {
+   *row_block_size = 2;
+   *e_block_size = 4;
+   *f_block_size = Eigen::Dynamic;
+  return;
+ }
+ if (options.row_block_size == 2){
+   *row_block_size = 2;
+   *e_block_size = Eigen::Dynamic;
+   *f_block_size = Eigen::Dynamic;
+  return;
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 2)) {
+   *row_block_size = 4;
+   *e_block_size = 4;
+   *f_block_size = 2;
+  return;
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 3)) {
+   *row_block_size = 4;
+   *e_block_size = 4;
+   *f_block_size = 3;
+  return;
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4) &&
+     (options.f_block_size == 4)) {
+   *row_block_size = 4;
+   *e_block_size = 4;
+   *f_block_size = 4;
+  return;
+ }
+ if ((options.row_block_size == 4) &&
+     (options.e_block_size == 4)) {
+   *row_block_size = 4;
+   *e_block_size = 4;
+   *f_block_size = Eigen::Dynamic;
+  return;
+ }
+
+#endif
+  return;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/schur_templates.h b/internal/ceres/schur_templates.h
new file mode 100644
index 0000000..90aee0a
--- /dev/null
+++ b/internal/ceres/schur_templates.h
@@ -0,0 +1,46 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+
+#ifndef CERES_INTERNAL_SCHUR_TEMPLATES_H_
+#define CERES_INTERNAL_SCHUR_TEMPLATES_H_
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+                                        int* e_block_size,
+                                        int* f_block_size);
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCHUR_TEMPLATES_H_
diff --git a/internal/ceres/scoped_thread_token.h b/internal/ceres/scoped_thread_token.h
new file mode 100644
index 0000000..c167397
--- /dev/null
+++ b/internal/ceres/scoped_thread_token.h
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#ifndef CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
+#define CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
+
+#include "ceres/thread_token_provider.h"
+
+namespace ceres {
+namespace internal {
+
+// Helper class for ThreadTokenProvider. This object acquires a token in its
+// constructor and puts that token back with destruction.
+class ScopedThreadToken {
+ public:
+  ScopedThreadToken(ThreadTokenProvider* provider)
+      : provider_(provider), token_(provider->Acquire()) {}
+
+  ~ScopedThreadToken() { provider_->Release(token_); }
+
+  int token() const { return token_; }
+
+ private:
+  ThreadTokenProvider* provider_;
+  int token_;
+
+  ScopedThreadToken(ScopedThreadToken&);
+  ScopedThreadToken& operator=(ScopedThreadToken&);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
diff --git a/internal/ceres/scratch_evaluate_preparer.cc b/internal/ceres/scratch_evaluate_preparer.cc
new file mode 100644
index 0000000..f01ef11
--- /dev/null
+++ b/internal/ceres/scratch_evaluate_preparer.cc
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/scratch_evaluate_preparer.h"
+
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+
+namespace ceres {
+namespace internal {
+
+ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(
+    const Program &program,
+    int num_threads) {
+  ScratchEvaluatePreparer* preparers = new ScratchEvaluatePreparer[num_threads];
+  int max_derivatives_per_residual_block =
+      program.MaxDerivativesPerResidualBlock();
+  for (int i = 0; i < num_threads; i++) {
+    preparers[i].Init(max_derivatives_per_residual_block);
+  }
+  return preparers;
+}
+
+void ScratchEvaluatePreparer::Init(int max_derivatives_per_residual_block) {
+  jacobian_scratch_.reset(
+      new double[max_derivatives_per_residual_block]);
+}
+
+// Point the jacobian blocks into the scratch area of this evaluate preparer.
+void ScratchEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
+                                      int /* residual_block_index */,
+                                      SparseMatrix* /* jacobian */,
+                                      double** jacobians) {
+  double* jacobian_block_cursor = jacobian_scratch_.get();
+  int num_residuals = residual_block->NumResiduals();
+  int num_parameter_blocks = residual_block->NumParameterBlocks();
+  for (int j = 0; j < num_parameter_blocks; ++j) {
+    const ParameterBlock* parameter_block =
+        residual_block->parameter_blocks()[j];
+    if (parameter_block->IsConstant()) {
+      jacobians[j] = NULL;
+    } else {
+      jacobians[j] = jacobian_block_cursor;
+      jacobian_block_cursor += num_residuals * parameter_block->LocalSize();
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/scratch_evaluate_preparer.h b/internal/ceres/scratch_evaluate_preparer.h
new file mode 100644
index 0000000..c8d9b93
--- /dev/null
+++ b/internal/ceres/scratch_evaluate_preparer.h
@@ -0,0 +1,69 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A scratch evaluate preparer provides temporary storage for the jacobians that
+// are created when running user-provided cost functions. The evaluator takes
+// care to avoid evaluating the jacobian for fixed parameters.
+
+#ifndef CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
+#define CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
+
+#include <memory>
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class ResidualBlock;
+class SparseMatrix;
+
+class ScratchEvaluatePreparer {
+ public:
+  // Create num_threads ScratchEvaluatePreparers.
+  static ScratchEvaluatePreparer* Create(const Program &program,
+                                         int num_threads);
+
+  // EvaluatePreparer interface
+  void Init(int max_derivatives_per_residual_block);
+  void Prepare(const ResidualBlock* residual_block,
+               int residual_block_index,
+               SparseMatrix* jacobian,
+               double** jacobians);
+
+ private:
+  // Scratch space for the jacobians; each jacobian is packed one after another.
+  // There is enough scratch to hold all the jacobians for the largest residual.
+  std::unique_ptr<double[]> jacobian_scratch_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
diff --git a/internal/ceres/single_linkage_clustering.cc b/internal/ceres/single_linkage_clustering.cc
new file mode 100644
index 0000000..394492c
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering.cc
@@ -0,0 +1,94 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/single_linkage_clustering.h"
+
+#include <unordered_set>
+#include <unordered_map>
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+
+namespace ceres {
+namespace internal {
+
+int ComputeSingleLinkageClustering(
+    const SingleLinkageClusteringOptions& options,
+    const WeightedGraph<int>& graph,
+    std::unordered_map<int, int>* membership) {
+  CHECK(membership != nullptr);
+  membership->clear();
+
+  // Initially each vertex is in its own cluster.
+  const std::unordered_set<int>& vertices = graph.vertices();
+  for (const int v : vertices) {
+    (*membership)[v] = v;
+  }
+
+  for (const int vertex1 : vertices) {
+    const std::unordered_set<int>& neighbors = graph.Neighbors(vertex1);
+    for (const int vertex2 : neighbors) {
+      // Since the graph is undirected, only pay attention to one side
+      // of the edge and ignore weak edges.
+      if ((vertex1 > vertex2) ||
+          (graph.EdgeWeight(vertex1, vertex2) < options.min_similarity)) {
+        continue;
+      }
+
+      // Use a union-find algorithm to keep track of the clusters.
+      const int c1 = FindConnectedComponent(vertex1, membership);
+      const int c2 = FindConnectedComponent(vertex2, membership);
+
+      if (c1 == c2) {
+        continue;
+      }
+
+      if (c1 < c2) {
+        (*membership)[c2] = c1;
+      } else {
+        (*membership)[c1] = c2;
+      }
+    }
+  }
+
+  // Make sure that every vertex is connected directly to the vertex
+  // identifying the cluster.
+  int num_clusters = 0;
+  for (auto& m : *membership) {
+    m.second = FindConnectedComponent(m.first, membership);
+    if (m.first == m.second) {
+      ++num_clusters;
+    }
+  }
+
+  return num_clusters;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/single_linkage_clustering.h b/internal/ceres/single_linkage_clustering.h
new file mode 100644
index 0000000..ccd6f8e
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering.h
@@ -0,0 +1,64 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+
+#include <unordered_map>
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct SingleLinkageClusteringOptions {
+  // Graph edges with edge weight less than min_similarity are ignored
+  // during the clustering process.
+  double min_similarity = 0.99;
+};
+
+// Compute a partitioning of the vertices of the graph using the
+// single linkage clustering algorithm. Edges with weight less than
+// SingleLinkageClusteringOptions::min_similarity will be ignored.
+//
+// membership upon return will contain a mapping from the vertices of
+// the graph to an integer indicating the identity of the cluster that
+// it belongs to.
+//
+// The return value of this function is the number of clusters
+// identified by the algorithm.
+int ComputeSingleLinkageClustering(
+    const SingleLinkageClusteringOptions& options,
+    const WeightedGraph<int>& graph,
+    std::unordered_map<int, int>* membership);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
diff --git a/internal/ceres/single_linkage_clustering_test.cc b/internal/ceres/single_linkage_clustering_test.cc
new file mode 100644
index 0000000..281c281
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering_test.cc
@@ -0,0 +1,125 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sameer Agarwal (sameeragarwal@google.com)
+
+#include "ceres/single_linkage_clustering.h"
+
+#include <unordered_map>
+#include "ceres/graph.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SingleLinkageClustering, GraphHasTwoComponents) {
+  WeightedGraph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 1.0);
+  graph.AddEdge(4, 5, 1.0);
+
+  SingleLinkageClusteringOptions options;
+  std::unordered_map<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_NE(membership[4], membership[0]);
+  EXPECT_NE(membership[5], membership[0]);
+  EXPECT_EQ(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLink) {
+  WeightedGraph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 1.0);
+
+  // This component should break up into two.
+  graph.AddEdge(4, 5, 0.5);
+
+  SingleLinkageClusteringOptions options;
+  std::unordered_map<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_NE(membership[4], membership[0]);
+  EXPECT_NE(membership[5], membership[0]);
+  EXPECT_NE(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLinkAndStrongLink) {
+  WeightedGraph<int> graph;
+  const int kNumVertices = 6;
+  for (int i = 0; i < kNumVertices; ++i) {
+    graph.AddVertex(i);
+  }
+  // Graph structure:
+  //
+  //  0-1-2-3 4-5
+  graph.AddEdge(0, 1, 1.0);
+  graph.AddEdge(1, 2, 1.0);
+  graph.AddEdge(2, 3, 0.5);  // Weak link
+  graph.AddEdge(0, 3, 1.0);
+
+  // This component should break up into two.
+  graph.AddEdge(4, 5, 1.0);
+
+  SingleLinkageClusteringOptions options;
+  std::unordered_map<int, int> membership;
+  ComputeSingleLinkageClustering(options, graph, &membership);
+  EXPECT_EQ(membership.size(), kNumVertices);
+
+  EXPECT_EQ(membership[1], membership[0]);
+  EXPECT_EQ(membership[2], membership[0]);
+  EXPECT_EQ(membership[3], membership[0]);
+  EXPECT_EQ(membership[4], membership[5]);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/small_blas.h b/internal/ceres/small_blas.h
new file mode 100644
index 0000000..81c5872
--- /dev/null
+++ b/internal/ceres/small_blas.h
@@ -0,0 +1,555 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Simple blas functions for use in the Schur Eliminator. These are
+// fairly basic implementations which already yield a significant
+// speedup in the eliminator performance.
+
+#ifndef CERES_INTERNAL_SMALL_BLAS_H_
+#define CERES_INTERNAL_SMALL_BLAS_H_
+
+#include "ceres/internal/port.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "small_blas_generic.h"
+
+namespace ceres {
+namespace internal {
+
+// The following three macros are used to share code and reduce
+// template junk across the various GEMM variants.
+#define CERES_GEMM_BEGIN(name)                                          \
+  template<int kRowA, int kColA, int kRowB, int kColB, int kOperation>  \
+  inline void name(const double* A,                                     \
+                   const int num_row_a,                                 \
+                   const int num_col_a,                                 \
+                   const double* B,                                     \
+                   const int num_row_b,                                 \
+                   const int num_col_b,                                 \
+                   double* C,                                           \
+                   const int start_row_c,                               \
+                   const int start_col_c,                               \
+                   const int row_stride_c,                              \
+                   const int col_stride_c)
+
+#define CERES_GEMM_NAIVE_HEADER                                         \
+  DCHECK_GT(num_row_a, 0);                                              \
+  DCHECK_GT(num_col_a, 0);                                              \
+  DCHECK_GT(num_row_b, 0);                                              \
+  DCHECK_GT(num_col_b, 0);                                              \
+  DCHECK_GE(start_row_c, 0);                                            \
+  DCHECK_GE(start_col_c, 0);                                            \
+  DCHECK_GT(row_stride_c, 0);                                           \
+  DCHECK_GT(col_stride_c, 0);                                           \
+  DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a));            \
+  DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a));            \
+  DCHECK((kRowB == Eigen::Dynamic) || (kRowB == num_row_b));            \
+  DCHECK((kColB == Eigen::Dynamic) || (kColB == num_col_b));            \
+  const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);  \
+  const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);  \
+  const int NUM_ROW_B = (kRowB != Eigen::Dynamic ? kRowB : num_row_b);  \
+  const int NUM_COL_B = (kColB != Eigen::Dynamic ? kColB : num_col_b);
+
+#define CERES_GEMM_EIGEN_HEADER                                         \
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef               \
+  Aref(A, num_row_a, num_col_a);                                        \
+  const typename EigenTypes<kRowB, kColB>::ConstMatrixRef               \
+  Bref(B, num_row_b, num_col_b);                                        \
+  MatrixRef Cref(C, row_stride_c, col_stride_c);                        \
+
+#define CERES_CALL_GEMM(name)                                           \
+  name<kRowA, kColA, kRowB, kColB, kOperation>(                         \
+      A, num_row_a, num_col_a,                                          \
+      B, num_row_b, num_col_b,                                          \
+      C, start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+#define CERES_GEMM_STORE_SINGLE(p, index, value)                        \
+  if (kOperation > 0) {                                                 \
+    p[index] += value;                                                  \
+  } else if (kOperation < 0) {                                          \
+    p[index] -= value;                                                  \
+  } else {                                                              \
+    p[index] = value;                                                   \
+  }
+
+#define CERES_GEMM_STORE_PAIR(p, index, v1, v2)                         \
+  if (kOperation > 0) {                                                 \
+    p[index] += v1;                                                     \
+    p[index + 1] += v2;                                                 \
+  } else if (kOperation < 0) {                                          \
+    p[index] -= v1;                                                     \
+    p[index + 1] -= v2;                                                 \
+  } else {                                                              \
+    p[index] = v1;                                                      \
+    p[index + 1] = v2;                                                  \
+  }
+
+// For the matrix-matrix functions below, there are three variants for
+// each functionality. Foo, FooNaive and FooEigen. Foo is the one to
+// be called by the user. FooNaive is a basic loop based
+// implementation and FooEigen uses Eigen's implementation. Foo
+// chooses between FooNaive and FooEigen depending on how many of the
+// template arguments are fixed at compile time. Currently, FooEigen
+// is called if all matrix dimensions are compile time
+// constants. FooNaive is called otherwise. This leads to the best
+// performance currently.
+//
+// The MatrixMatrixMultiply variants compute:
+//
+//   C op A * B;
+//
+// The MatrixTransposeMatrixMultiply variants compute:
+//
+//   C op A' * B
+//
+// where op can be +=, -=, or =.
+//
+// The template parameters (kRowA, kColA, kRowB, kColB) allow
+// specialization of the loop at compile time. If this information is
+// not available, then Eigen::Dynamic should be used as the template
+// argument.
+//
+//   kOperation =  1  -> C += A * B
+//   kOperation = -1  -> C -= A * B
+//   kOperation =  0  -> C  = A * B
+//
+// The functions can write into matrices C which are larger than the
+// matrix A * B. This is done by specifying the true size of C via
+// row_stride_c and col_stride_c, and then indicating where A * B
+// should be written into by start_row_c and start_col_c.
+//
+// Graphically if row_stride_c = 10, col_stride_c = 12, start_row_c =
+// 4 and start_col_c = 5, then if A = 3x2 and B = 2x4, we get
+//
+//   ------------
+//   ------------
+//   ------------
+//   ------------
+//   -----xxxx---
+//   -----xxxx---
+//   -----xxxx---
+//   ------------
+//   ------------
+//   ------------
+//
+CERES_GEMM_BEGIN(MatrixMatrixMultiplyEigen) {
+  CERES_GEMM_EIGEN_HEADER
+  Eigen::Block<MatrixRef, kRowA, kColB>
+    block(Cref, start_row_c, start_col_c, num_row_a, num_col_b);
+
+  if (kOperation > 0) {
+    block.noalias() += Aref * Bref;
+  } else if (kOperation < 0) {
+    block.noalias() -= Aref * Bref;
+  } else {
+    block.noalias() = Aref * Bref;
+  }
+}
+
+CERES_GEMM_BEGIN(MatrixMatrixMultiplyNaive) {
+  CERES_GEMM_NAIVE_HEADER
+  DCHECK_EQ(NUM_COL_A, NUM_ROW_B);
+
+  const int NUM_ROW_C = NUM_ROW_A;
+  const int NUM_COL_C = NUM_COL_B;
+  DCHECK_LE(start_row_c + NUM_ROW_C, row_stride_c);
+  DCHECK_LE(start_col_c + NUM_COL_C, col_stride_c);
+  const int span = 4;
+
+  // Calculate the remainder part first.
+
+  // Process the last odd column if present.
+  if (NUM_COL_C & 1) {
+    int col = NUM_COL_C - 1;
+    const double* pa = &A[0];
+    for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
+      const double* pb = &B[col];
+      double tmp = 0.0;
+      for (int k = 0; k < NUM_COL_A; ++k, pb += NUM_COL_B) {
+        tmp += pa[k] * pb[0];
+      }
+
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      CERES_GEMM_STORE_SINGLE(C, index, tmp);
+    }
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_C == 1) {
+      return;
+    }
+  }
+
+  // Process the couple columns in remainder if present.
+  if (NUM_COL_C & 2) {
+    int col = NUM_COL_C & (int)(~(span - 1)) ;
+    const double* pa = &A[0];
+    for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
+      const double* pb = &B[col];
+      double tmp1 = 0.0, tmp2 = 0.0;
+      for (int k = 0; k < NUM_COL_A; ++k, pb += NUM_COL_B) {
+        double av = pa[k];
+        tmp1 += av * pb[0];
+        tmp2 += av * pb[1];
+      }
+
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      CERES_GEMM_STORE_PAIR(C, index, tmp1, tmp2);
+    }
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_C < span) {
+      return;
+    }
+  }
+
+  // Calculate the main part with multiples of 4.
+  int col_m = NUM_COL_C & (int)(~(span - 1));
+  for (int col = 0; col < col_m; col += span) {
+    for (int row = 0; row < NUM_ROW_C; ++row) {
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      MMM_mat1x4(NUM_COL_A, &A[row * NUM_COL_A],
+                 &B[col], NUM_COL_B, &C[index], kOperation);
+    }
+  }
+
+}
+
+CERES_GEMM_BEGIN(MatrixMatrixMultiply) {
+#ifdef CERES_NO_CUSTOM_BLAS
+
+  CERES_CALL_GEMM(MatrixMatrixMultiplyEigen)
+  return;
+
+#else
+
+  if (kRowA != Eigen::Dynamic && kColA != Eigen::Dynamic &&
+      kRowB != Eigen::Dynamic && kColB != Eigen::Dynamic) {
+    CERES_CALL_GEMM(MatrixMatrixMultiplyEigen)
+  } else {
+    CERES_CALL_GEMM(MatrixMatrixMultiplyNaive)
+  }
+
+#endif
+}
+
+CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyEigen) {
+  CERES_GEMM_EIGEN_HEADER
+  Eigen::Block<MatrixRef, kColA, kColB> block(Cref,
+                                              start_row_c, start_col_c,
+                                              num_col_a, num_col_b);
+  if (kOperation > 0) {
+    block.noalias() += Aref.transpose() * Bref;
+  } else if (kOperation < 0) {
+    block.noalias() -= Aref.transpose() * Bref;
+  } else {
+    block.noalias() = Aref.transpose() * Bref;
+  }
+}
+
+CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyNaive) {
+  CERES_GEMM_NAIVE_HEADER
+  DCHECK_EQ(NUM_ROW_A, NUM_ROW_B);
+
+  const int NUM_ROW_C = NUM_COL_A;
+  const int NUM_COL_C = NUM_COL_B;
+  DCHECK_LE(start_row_c + NUM_ROW_C, row_stride_c);
+  DCHECK_LE(start_col_c + NUM_COL_C, col_stride_c);
+  const int span = 4;
+
+  // Process the remainder part first.
+
+  // Process the last odd column if present.
+  if (NUM_COL_C & 1) {
+    int col = NUM_COL_C - 1;
+    for (int row = 0; row < NUM_ROW_C; ++row) {
+      const double* pa = &A[row];
+      const double* pb = &B[col];
+      double tmp = 0.0;
+      for (int k = 0; k < NUM_ROW_A; ++k) {
+        tmp += pa[0] * pb[0];
+        pa += NUM_COL_A;
+        pb += NUM_COL_B;
+      }
+
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      CERES_GEMM_STORE_SINGLE(C, index, tmp);
+    }
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_C == 1) {
+      return;
+    }
+  }
+
+  // Process the couple columns in remainder if present.
+  if (NUM_COL_C & 2) {
+    int col = NUM_COL_C & (int)(~(span - 1)) ;
+    for (int row = 0; row < NUM_ROW_C; ++row) {
+      const double* pa = &A[row];
+      const double* pb = &B[col];
+      double tmp1 = 0.0, tmp2 = 0.0;
+      for (int k = 0; k < NUM_ROW_A; ++k) {
+        double av = *pa;
+        tmp1 += av * pb[0];
+        tmp2 += av * pb[1];
+        pa += NUM_COL_A;
+        pb += NUM_COL_B;
+      }
+
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      CERES_GEMM_STORE_PAIR(C, index, tmp1, tmp2);
+    }
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_C < span) {
+      return;
+    }
+  }
+
+  // Process the main part with multiples of 4.
+  int col_m = NUM_COL_C & (int)(~(span - 1));
+  for (int col = 0; col < col_m; col += span) {
+    for (int row = 0; row < NUM_ROW_C; ++row) {
+      const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+      MTM_mat1x4(NUM_ROW_A, &A[row], NUM_COL_A,
+                 &B[col], NUM_COL_B, &C[index], kOperation);
+    }
+  }
+
+}
+
+CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiply) {
+#ifdef CERES_NO_CUSTOM_BLAS
+
+  CERES_CALL_GEMM(MatrixTransposeMatrixMultiplyEigen)
+  return;
+
+#else
+
+  if (kRowA != Eigen::Dynamic && kColA != Eigen::Dynamic &&
+      kRowB != Eigen::Dynamic && kColB != Eigen::Dynamic) {
+    CERES_CALL_GEMM(MatrixTransposeMatrixMultiplyEigen)
+  } else {
+    CERES_CALL_GEMM(MatrixTransposeMatrixMultiplyNaive)
+  }
+
+#endif
+}
+
+// Matrix-Vector multiplication
+//
+// c op A * b;
+//
+// where op can be +=, -=, or =.
+//
+// The template parameters (kRowA, kColA) allow specialization of the
+// loop at compile time. If this information is not available, then
+// Eigen::Dynamic should be used as the template argument.
+//
+// kOperation =  1  -> c += A' * b
+// kOperation = -1  -> c -= A' * b
+// kOperation =  0  -> c  = A' * b
+template<int kRowA, int kColA, int kOperation>
+inline void MatrixVectorMultiply(const double* A,
+                                 const int num_row_a,
+                                 const int num_col_a,
+                                 const double* b,
+                                 double* c) {
+#ifdef CERES_NO_CUSTOM_BLAS
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
+      Aref(A, num_row_a, num_col_a);
+  const typename EigenTypes<kColA>::ConstVectorRef bref(b, num_col_a);
+  typename EigenTypes<kRowA>::VectorRef cref(c, num_row_a);
+
+  // lazyProduct works better than .noalias() for matrix-vector
+  // products.
+  if (kOperation > 0) {
+    cref += Aref.lazyProduct(bref);
+  } else if (kOperation < 0) {
+    cref -= Aref.lazyProduct(bref);
+  } else {
+    cref = Aref.lazyProduct(bref);
+  }
+#else
+
+  DCHECK_GT(num_row_a, 0);
+  DCHECK_GT(num_col_a, 0);
+  DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a));
+  DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a));
+
+  const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);
+  const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);
+  const int span = 4;
+
+  // Calculate the remainder part first.
+
+  // Process the last odd row if present.
+  if (NUM_ROW_A & 1) {
+    int row  = NUM_ROW_A - 1;
+    const double* pa = &A[row * NUM_COL_A];
+    const double* pb = &b[0];
+    double tmp = 0.0;
+    for (int col = 0; col < NUM_COL_A; ++col) {
+      tmp += (*pa++) * (*pb++);
+    }
+    CERES_GEMM_STORE_SINGLE(c, row, tmp);
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_ROW_A == 1) {
+      return;
+    }
+  }
+
+  // Process the couple rows in remainder if present.
+  if (NUM_ROW_A & 2) {
+    int row = NUM_ROW_A & (int)(~(span - 1));
+    const double* pa1 = &A[row * NUM_COL_A];
+    const double* pa2 = pa1 + NUM_COL_A;
+    const double* pb = &b[0];
+    double tmp1 = 0.0, tmp2 = 0.0;
+    for (int col = 0; col < NUM_COL_A; ++col) {
+      double bv = *pb++;
+      tmp1 += *(pa1++) * bv;
+      tmp2 += *(pa2++) * bv;
+    }
+    CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_ROW_A < span) {
+      return;
+    }
+  }
+
+  // Calculate the main part with multiples of 4.
+  int row_m = NUM_ROW_A & (int)(~(span - 1));
+  for (int row = 0; row < row_m; row += span) {
+    MVM_mat4x1(NUM_COL_A, &A[row * NUM_COL_A], NUM_COL_A,
+               &b[0], &c[row], kOperation);
+  }
+
+#endif  // CERES_NO_CUSTOM_BLAS
+}
+
+// Similar to MatrixVectorMultiply, except that A is transposed, i.e.,
+//
+// c op A' * b;
+template<int kRowA, int kColA, int kOperation>
+inline void MatrixTransposeVectorMultiply(const double* A,
+                                          const int num_row_a,
+                                          const int num_col_a,
+                                          const double* b,
+                                          double* c) {
+#ifdef CERES_NO_CUSTOM_BLAS
+  const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
+      Aref(A, num_row_a, num_col_a);
+  const typename EigenTypes<kRowA>::ConstVectorRef bref(b, num_row_a);
+  typename EigenTypes<kColA>::VectorRef cref(c, num_col_a);
+
+  // lazyProduct works better than .noalias() for matrix-vector
+  // products.
+  if (kOperation > 0) {
+    cref += Aref.transpose().lazyProduct(bref);
+  } else if (kOperation < 0) {
+    cref -= Aref.transpose().lazyProduct(bref);
+  } else {
+    cref = Aref.transpose().lazyProduct(bref);
+  }
+#else
+
+  DCHECK_GT(num_row_a, 0);
+  DCHECK_GT(num_col_a, 0);
+  DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a));
+  DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a));
+
+  const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);
+  const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);
+  const int span = 4;
+
+  // Calculate the remainder part first.
+
+  // Process the last odd column if present.
+  if (NUM_COL_A & 1) {
+    int row  = NUM_COL_A - 1;
+    const double* pa = &A[row];
+    const double* pb = &b[0];
+    double tmp = 0.0;
+    for (int col = 0; col < NUM_ROW_A; ++col) {
+      tmp += *pa * (*pb++);
+      pa += NUM_COL_A;
+    }
+    CERES_GEMM_STORE_SINGLE(c, row, tmp);
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_A == 1) {
+      return;
+    }
+  }
+
+  // Process the couple columns in remainder if present.
+  if (NUM_COL_A & 2) {
+    int row = NUM_COL_A & (int)(~(span - 1));
+    const double* pa = &A[row];
+    const double* pb = &b[0];
+    double tmp1 = 0.0, tmp2 = 0.0;
+    for (int col = 0; col < NUM_ROW_A; ++col) {
+      double bv = *pb++;
+      tmp1 += *(pa    ) * bv;
+      tmp2 += *(pa + 1) * bv;
+      pa += NUM_COL_A;
+    }
+    CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
+
+    // Return directly for efficiency of extremely small matrix multiply.
+    if (NUM_COL_A < span) {
+      return;
+    }
+  }
+
+  // Calculate the main part with multiples of 4.
+  int row_m = NUM_COL_A & (int)(~(span - 1));
+  for (int row = 0; row < row_m; row += span) {
+    MTV_mat4x1(NUM_ROW_A, &A[row], NUM_COL_A,
+               &b[0], &c[row], kOperation);
+  }
+
+#endif  // CERES_NO_CUSTOM_BLAS
+}
+
+#undef CERES_GEMM_BEGIN
+#undef CERES_GEMM_EIGEN_HEADER
+#undef CERES_GEMM_NAIVE_HEADER
+#undef CERES_CALL_GEMM
+#undef CERES_GEMM_STORE_SINGLE
+#undef CERES_GEMM_STORE_PAIR
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SMALL_BLAS_H_
diff --git a/internal/ceres/small_blas_gemm_benchmark.cc b/internal/ceres/small_blas_gemm_benchmark.cc
new file mode 100644
index 0000000..0a760a5
--- /dev/null
+++ b/internal/ceres/small_blas_gemm_benchmark.cc
@@ -0,0 +1,165 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <iostream>
+#include "Eigen/Dense"
+#include "benchmark/benchmark.h"
+#include "ceres/small_blas.h"
+
+namespace ceres {
+namespace internal {
+
+// Benchmarking matrix-matrix multiply routines and optimizing memory
+// access requires that we make sure that they are not just sitting in
+// the cache. So, as the benchmarking routine iterates, we need to
+// multiply new/different matrice. Allocating/creating these objects
+// in the benchmarking loop is too heavy duty, so we create them
+// before hand and cycle through them in the benchmark. This class,
+// given the size of the matrices creates such objects for use in the
+// benchmark.
+class MatrixMatrixMultiplyData {
+ public:
+  MatrixMatrixMultiplyData(
+      int a_rows, int a_cols, int b_rows, int b_cols, int c_rows, int c_cols)
+      : num_elements_(1000),
+        a_size_(a_rows * a_cols),
+        b_size_(b_rows * b_cols),
+        c_size_(c_rows * c_cols),
+        a_(num_elements_ * a_size_, 1.00001),
+        b_(num_elements_ * b_size_, 0.5),
+        c_(num_elements_ * c_size_, -1.1) {}
+
+  int num_elements() const { return num_elements_; }
+  double* GetA(int i) { return &a_[i * a_size_]; }
+  double* GetB(int i) { return &b_[i * b_size_]; }
+  double* GetC(int i) { return &c_[i * c_size_]; }
+
+ private:
+  int num_elements_;
+  int a_size_;
+  int b_size_;
+  int c_size_;
+  std::vector<double> a_;
+  std::vector<double> b_;
+  std::vector<double> c_;
+};
+
+static void MatrixMatrixMultiplySizeArguments(
+    benchmark::internal::Benchmark* benchmark) {
+  const std::vector<int> b_rows = {1, 2, 3, 4, 6, 8};
+  const std::vector<int> b_cols = {1, 2, 3, 4, 8, 12, 15};
+  const std::vector<int> c_cols = b_cols;
+  for (int i : b_rows) {
+    for (int j : b_cols) {
+      for (int k : c_cols) {
+        benchmark->Args({i, j, k});
+      }
+    }
+  }
+}
+
+void BM_MatrixMatrixMultiplyDynamic(benchmark::State& state) {
+  const int i = state.range(0);
+  const int j = state.range(1);
+  const int k = state.range(2);
+
+  const int b_rows = i;
+  const int b_cols = j;
+  const int c_rows = b_cols;
+  const int c_cols = k;
+  const int a_rows = b_rows;
+  const int a_cols = c_cols;
+
+  MatrixMatrixMultiplyData data(a_rows, a_cols, b_rows, b_cols, c_rows, c_cols);
+  const int num_elements = data.num_elements();
+
+  int iter = 0;
+  for (auto _ : state) {
+    // a += b * c
+    MatrixMatrixMultiply
+        <Eigen::Dynamic, Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic, 1>
+        (data.GetB(iter), b_rows, b_cols,
+         data.GetC(iter), c_rows, c_cols,
+         data.GetA(iter), 0, 0, a_rows, a_cols);
+    iter = (iter + 1) % num_elements;
+  }
+}
+
+BENCHMARK(BM_MatrixMatrixMultiplyDynamic)
+    ->Apply(MatrixMatrixMultiplySizeArguments);
+
+static void MatrixTransposeMatrixMultiplySizeArguments(
+    benchmark::internal::Benchmark* benchmark) {
+  std::vector<int> b_rows = {1, 2, 3, 4, 6, 8};
+  std::vector<int> b_cols = {1, 2, 3, 4, 8, 12, 15};
+  std::vector<int> c_cols = b_rows;
+  for (int i : b_rows) {
+    for (int j : b_cols) {
+      for (int k : c_cols) {
+        benchmark->Args({i, j, k});
+      }
+    }
+  }
+}
+
+void BM_MatrixTransposeMatrixMultiplyDynamic(benchmark::State& state) {
+  const int i = state.range(0);
+  const int j = state.range(1);
+  const int k = state.range(2);
+
+  const int b_rows = i;
+  const int b_cols = j;
+  const int c_rows = b_rows;
+  const int c_cols = k;
+  const int a_rows = b_cols;
+  const int a_cols = c_cols;
+
+  MatrixMatrixMultiplyData data(a_rows, a_cols, b_rows, b_cols, c_rows, c_cols);
+  const int num_elements = data.num_elements();
+
+  int iter = 0;
+  for (auto _ : state) {
+    // a += b' * c
+    MatrixTransposeMatrixMultiply
+        <Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic,Eigen::Dynamic, 1>
+        (data.GetB(iter), b_rows, b_cols,
+         data.GetC(iter), c_rows, c_cols,
+         data.GetA(iter), 0, 0, a_rows, a_cols);
+    iter = (iter + 1) % num_elements;
+  }
+}
+
+BENCHMARK(BM_MatrixTransposeMatrixMultiplyDynamic)
+    ->Apply(MatrixTransposeMatrixMultiplySizeArguments);
+
+}  // internal
+}  // namespace ceres
+
+BENCHMARK_MAIN();
diff --git a/internal/ceres/small_blas_gemv_benchmark.cc b/internal/ceres/small_blas_gemv_benchmark.cc
new file mode 100644
index 0000000..4b587bf
--- /dev/null
+++ b/internal/ceres/small_blas_gemv_benchmark.cc
@@ -0,0 +1,114 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "Eigen/Dense"
+#include "benchmark/benchmark.h"
+#include "ceres/small_blas.h"
+
+namespace ceres {
+
+// Benchmarking matrix-vector multiply routines and optimizing memory
+// access requires that we make sure that they are not just sitting in
+// the cache. So, as the benchmarking routine iterates, we need to
+// multiply new/different matrice and vectors. Allocating/creating
+// these objects in the benchmarking loop is too heavy duty, so we
+// create them before hand and cycle through them in the
+// benchmark. This class, given the size of the matrix creates such
+// matrix and vector objects for use in the benchmark.
+class MatrixVectorMultiplyData {
+ public:
+  MatrixVectorMultiplyData(int rows, int cols)
+      : num_elements_(1000),
+        rows_(rows),
+        cols_(cols),
+        a_(num_elements_ * rows, 1.001),
+        b_(num_elements_ * rows * cols, 1.5),
+        c_(num_elements_ * cols, 1.00003) {}
+
+  int num_elements() const { return num_elements_; }
+  double* GetA(int i) { return &a_[i * rows_]; }
+  double* GetB(int i) { return &b_[i * rows_ * cols_]; }
+  double* GetC(int i) { return &c_[i * cols_]; }
+
+ private:
+  const int num_elements_;
+  const int rows_;
+  const int cols_;
+  std::vector<double> a_;
+  std::vector<double> b_;
+  std::vector<double> c_;
+};
+
+// Helper function to generate the various matrix sizes for which we
+// run the benchmark.
+static void MatrixSizeArguments(benchmark::internal::Benchmark* benchmark) {
+  std::vector<int> rows = {1, 2, 3, 4, 6, 8};
+  std::vector<int> cols = {1, 2, 3, 4, 8, 12, 15};
+  for (int r : rows) {
+    for (int c : cols) {
+      benchmark->Args({r, c});
+    }
+  }
+}
+
+void BM_MatrixVectorMultiply(benchmark::State& state) {
+  const int rows = state.range(0);
+  const int cols = state.range(1);
+  MatrixVectorMultiplyData data(rows, cols);
+  const int num_elements = data.num_elements();
+  int iter = 0;
+  for (auto _ : state) {
+    // A += B * C;
+    internal::MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+        data.GetB(iter), rows, cols, data.GetC(iter), data.GetA(iter));
+    iter = (iter + 1) % num_elements;
+  }
+}
+
+BENCHMARK(BM_MatrixVectorMultiply)->Apply(MatrixSizeArguments);
+
+void BM_MatrixTransposeVectorMultiply(benchmark::State& state) {
+  const int rows = state.range(0);
+  const int cols = state.range(1);
+  MatrixVectorMultiplyData data(cols, rows);
+  const int num_elements = data.num_elements();
+  int iter = 0;
+  for (auto _ : state) {
+    internal::MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+        data.GetB(iter), rows, cols, data.GetC(iter), data.GetA(iter));
+    iter = (iter + 1) % num_elements;
+  }
+}
+
+BENCHMARK(BM_MatrixTransposeVectorMultiply)->Apply(MatrixSizeArguments);
+
+}  // namespace ceres
+
+BENCHMARK_MAIN();
diff --git a/internal/ceres/small_blas_generic.h b/internal/ceres/small_blas_generic.h
new file mode 100644
index 0000000..978c5d5
--- /dev/null
+++ b/internal/ceres/small_blas_generic.h
@@ -0,0 +1,315 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yangfan34@lenovo.com (Lenovo Research Device+ Lab - Shanghai)
+//
+// Optimization for simple blas functions used in the Schur Eliminator.
+// These are fairly basic implementations which already yield a significant
+// speedup in the eliminator performance.
+
+#ifndef CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
+#define CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
+
+namespace ceres {
+namespace internal {
+
+// The following macros are used to share code
+#define CERES_GEMM_OPT_NAIVE_HEADER              \
+  double c0 = 0.0;                               \
+  double c1 = 0.0;                               \
+  double c2 = 0.0;                               \
+  double c3 = 0.0;                               \
+  const double* pa = a;                          \
+  const double* pb = b;                          \
+  const int span = 4;                            \
+  int col_r = col_a & (span - 1);                \
+  int col_m = col_a - col_r;
+
+#define CERES_GEMM_OPT_STORE_MAT1X4              \
+  if (kOperation > 0) {                          \
+    *c++ += c0;                                  \
+    *c++ += c1;                                  \
+    *c++ += c2;                                  \
+    *c++ += c3;                                  \
+  } else if (kOperation < 0) {                   \
+    *c++ -= c0;                                  \
+    *c++ -= c1;                                  \
+    *c++ -= c2;                                  \
+    *c++ -= c3;                                  \
+  } else {                                       \
+    *c++ = c0;                                   \
+    *c++ = c1;                                   \
+    *c++ = c2;                                   \
+    *c++ = c3;                                   \
+  }
+
+// Matrix-Matrix Multiplication
+// Figure out 1x4 of Matrix C in one batch
+//
+// c op a * B;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+//  Matrix C              Matrix A                   Matrix B
+//
+//  C0, C1, C2, C3   op   A0, A1, A2, A3, ...    *   B0, B1, B2, B3
+//                                                   B4, B5, B6, B7
+//                                                   B8, B9, Ba, Bb
+//                                                   Bc, Bd, Be, Bf
+//                                                   . , . , . , .
+//                                                   . , . , . , .
+//                                                   . , . , . , .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A
+static inline void MMM_mat1x4(const int col_a,
+                              const double* a,
+                              const double* b,
+                              const int col_stride_b,
+                              double* c,
+                              const int kOperation) {
+  CERES_GEMM_OPT_NAIVE_HEADER
+  double av = 0.0;
+  int bi = 0;
+
+#define CERES_GEMM_OPT_MMM_MAT1X4_MUL  \
+  av = pa[k];                          \
+  pb = b + bi;                         \
+  c0 += av * *pb++;                    \
+  c1 += av * *pb++;                    \
+  c2 += av * *pb++;                    \
+  c3 += av * *pb++;                    \
+  bi += col_stride_b;                  \
+  k++;
+
+  for (int k = 0; k < col_m;) {
+    CERES_GEMM_OPT_MMM_MAT1X4_MUL
+    CERES_GEMM_OPT_MMM_MAT1X4_MUL
+    CERES_GEMM_OPT_MMM_MAT1X4_MUL
+    CERES_GEMM_OPT_MMM_MAT1X4_MUL
+  }
+
+  for (int k = col_m; k < col_a;) {
+    CERES_GEMM_OPT_MMM_MAT1X4_MUL
+  }
+
+  CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MMM_MAT1X4_MUL
+}
+
+// Matrix Transpose-Matrix multiplication
+// Figure out 1x4 of Matrix C in one batch
+//
+// c op a' * B;
+// where op can be +=, -=, or = indicated by kOperation.
+//
+//                        Matrix A
+//
+//                        A0
+//                        A1
+//                        A2
+//                        A3
+//                        .
+//                        .
+//                        .
+//
+//  Matrix C              Matrix A'                  Matrix B
+//
+//  C0, C1, C2, C3   op   A0, A1, A2, A3, ...    *   B0, B1, B2, B3
+//                                                   B4, B5, B6, B7
+//                                                   B8, B9, Ba, Bb
+//                                                   Bc, Bd, Be, Bf
+//                                                   . , . , . , .
+//                                                   . , . , . , .
+//                                                   . , . , . , .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A'
+static inline void MTM_mat1x4(const int col_a,
+                              const double* a,
+                              const int col_stride_a,
+                              const double* b,
+                              const int col_stride_b,
+                              double* c,
+                              const int kOperation) {
+  CERES_GEMM_OPT_NAIVE_HEADER
+  double av = 0.0;
+  int ai = 0;
+  int bi = 0;
+
+#define CERES_GEMM_OPT_MTM_MAT1X4_MUL  \
+  av = pa[ai];                         \
+  pb = b + bi;                         \
+  c0 += av * *pb++;                    \
+  c1 += av * *pb++;                    \
+  c2 += av * *pb++;                    \
+  c3 += av * *pb++;                    \
+  ai += col_stride_a;                  \
+  bi += col_stride_b;
+
+  for (int k = 0; k < col_m; k += span) {
+    CERES_GEMM_OPT_MTM_MAT1X4_MUL
+    CERES_GEMM_OPT_MTM_MAT1X4_MUL
+    CERES_GEMM_OPT_MTM_MAT1X4_MUL
+    CERES_GEMM_OPT_MTM_MAT1X4_MUL
+  }
+
+  for (int k = col_m; k < col_a; k++) {
+    CERES_GEMM_OPT_MTM_MAT1X4_MUL
+  }
+
+  CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MTM_MAT1X4_MUL
+}
+
+// Matrix-Vector Multiplication
+// Figure out 4x1 of vector c in one batch
+//
+// c op A * b;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+//  Vector c              Matrix A                   Vector b
+//
+//  C0               op   A0, A1, A2, A3, ...    *   B0
+//  C1                    A4, A5, A6, A7, ...        B1
+//  C2                    A8, A9, Aa, Ab, ...        B2
+//  C3                    Ac, Ad, Ae, Af, ...        B3
+//                                                   .
+//                                                   .
+//                                                   .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A
+static inline void MVM_mat4x1(const int col_a,
+                              const double* a,
+                              const int col_stride_a,
+                              const double* b,
+                              double* c,
+                              const int kOperation) {
+  CERES_GEMM_OPT_NAIVE_HEADER
+  double bv = 0.0;
+
+#define CERES_GEMM_OPT_MVM_MAT4X1_MUL              \
+  bv = *pb;                                        \
+  c0 += *(pa                   ) * bv;             \
+  c1 += *(pa + col_stride_a    ) * bv;             \
+  c2 += *(pa + col_stride_a * 2) * bv;             \
+  c3 += *(pa + col_stride_a * 3) * bv;             \
+  pa++;                                            \
+  pb++;
+
+  for (int k = 0; k < col_m; k += span) {
+    CERES_GEMM_OPT_MVM_MAT4X1_MUL
+    CERES_GEMM_OPT_MVM_MAT4X1_MUL
+    CERES_GEMM_OPT_MVM_MAT4X1_MUL
+    CERES_GEMM_OPT_MVM_MAT4X1_MUL
+  }
+
+  for (int k = col_m; k < col_a; k++) {
+    CERES_GEMM_OPT_MVM_MAT4X1_MUL
+  }
+
+  CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MVM_MAT4X1_MUL
+}
+
+// Matrix Transpose-Vector multiplication
+// Figure out 4x1 of vector c in one batch
+//
+// c op A' * b;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+//                        Matrix A
+//
+//                        A0, A4, A8, Ac
+//                        A1, A5, A9, Ad
+//                        A2, A6, Aa, Ae
+//                        A3, A7, Ab, Af
+//                        . , . , . , .
+//                        . , . , . , .
+//                        . , . , . , .
+//
+//  Vector c              Matrix A'                  Vector b
+//
+//  C0               op   A0, A1, A2, A3, ...    *   B0
+//  C1                    A4, A5, A6, A7, ...        B1
+//  C2                    A8, A9, Aa, Ab, ...        B2
+//  C3                    Ac, Ad, Ae, Af, ...        B3
+//                                                   .
+//                                                   .
+//                                                   .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A'
+static inline void MTV_mat4x1(const int col_a,
+                              const double* a,
+                              const int col_stride_a,
+                              const double* b,
+                              double* c,
+                              const int kOperation) {
+  CERES_GEMM_OPT_NAIVE_HEADER
+  double bv = 0.0;
+
+#define CERES_GEMM_OPT_MTV_MAT4X1_MUL  \
+  bv = *pb;                            \
+  c0 += *(pa    ) * bv;                \
+  c1 += *(pa + 1) * bv;                \
+  c2 += *(pa + 2) * bv;                \
+  c3 += *(pa + 3) * bv;                \
+  pa += col_stride_a;                  \
+  pb++;
+
+  for (int k = 0; k < col_m; k += span) {
+    CERES_GEMM_OPT_MTV_MAT4X1_MUL
+    CERES_GEMM_OPT_MTV_MAT4X1_MUL
+    CERES_GEMM_OPT_MTV_MAT4X1_MUL
+    CERES_GEMM_OPT_MTV_MAT4X1_MUL
+  }
+
+  for (int k = col_m; k < col_a; k++) {
+    CERES_GEMM_OPT_MTV_MAT4X1_MUL
+  }
+
+  CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MTV_MAT4X1_MUL
+}
+
+#undef CERES_GEMM_OPT_NAIVE_HEADER
+#undef CERES_GEMM_OPT_STORE_MAT1X4
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
diff --git a/internal/ceres/small_blas_test.cc b/internal/ceres/small_blas_test.cc
new file mode 100644
index 0000000..2914244
--- /dev/null
+++ b/internal/ceres/small_blas_test.cc
@@ -0,0 +1,478 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/small_blas.h"
+
+#include <limits>
+#include "gtest/gtest.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 3.0 * std::numeric_limits<double>::epsilon();
+
+TEST(BLAS, MatrixMatrixMultiply) {
+  const int kRowA = 3;
+  const int kColA = 5;
+  Matrix A(kRowA, kColA);
+  A.setOnes();
+
+  const int kRowB = 5;
+  const int kColB = 7;
+  Matrix B(kRowB, kColB);
+  B.setOnes();
+
+  for (int row_stride_c = kRowA; row_stride_c < 3 * kRowA; ++row_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
+      Matrix C(row_stride_c, col_stride_c);
+      C.setOnes();
+
+      Matrix C_plus = C;
+      Matrix C_minus = C;
+      Matrix C_assign = C;
+
+      Matrix C_plus_ref = C;
+      Matrix C_minus_ref = C;
+      Matrix C_assign_ref = C;
+      for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
+        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
+          C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
+              A * B;
+
+          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
+              << "C += A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_plus_ref << "\n"
+              << "C: \n" << C_plus;
+
+
+          C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
+              A * B;
+
+          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+           EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
+              << "C -= A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_minus_ref << "\n"
+              << "C: \n" << C_minus;
+
+          C_assign_ref.block(start_row_c, start_col_c, kRowA, kColB) =
+              A * B;
+
+          MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
+              << "C = A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_assign_ref << "\n"
+              << "C: \n" << C_assign;
+        }
+      }
+    }
+  }
+}
+
+TEST(BLAS, MatrixTransposeMatrixMultiply) {
+  const int kRowA = 5;
+  const int kColA = 3;
+  Matrix A(kRowA, kColA);
+  A.setOnes();
+
+  const int kRowB = 5;
+  const int kColB = 7;
+  Matrix B(kRowB, kColB);
+  B.setOnes();
+
+  for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c <  3 * kColB; ++col_stride_c) {
+      Matrix C(row_stride_c, col_stride_c);
+      C.setOnes();
+
+      Matrix C_plus = C;
+      Matrix C_minus = C;
+      Matrix C_assign = C;
+
+      Matrix C_plus_ref = C;
+      Matrix C_minus_ref = C;
+      Matrix C_assign_ref = C;
+      for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
+        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
+          C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
+              << "C += A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_plus_ref << "\n"
+              << "C: \n" << C_plus;
+
+          C_minus_ref.block(start_row_c, start_col_c, kColA, kColB) -=
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
+              << "C -= A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_minus_ref << "\n"
+              << "C: \n" << C_minus;
+
+          C_assign_ref.block(start_row_c, start_col_c, kColA, kColB) =
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
+              << "C = A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_assign_ref << "\n"
+              << "C: \n" << C_assign;
+        }
+      }
+    }
+  }
+}
+
+// TODO(sameeragarwal): Dedup and reduce the amount of duplication of
+// test code in this file.
+
+TEST(BLAS, MatrixMatrixMultiplyNaive) {
+  const int kRowA = 3;
+  const int kColA = 5;
+  Matrix A(kRowA, kColA);
+  A.setOnes();
+
+  const int kRowB = 5;
+  const int kColB = 7;
+  Matrix B(kRowB, kColB);
+  B.setOnes();
+
+  for (int row_stride_c = kRowA; row_stride_c < 3 * kRowA; ++row_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
+      Matrix C(row_stride_c, col_stride_c);
+      C.setOnes();
+
+      Matrix C_plus = C;
+      Matrix C_minus = C;
+      Matrix C_assign = C;
+
+      Matrix C_plus_ref = C;
+      Matrix C_minus_ref = C;
+      Matrix C_assign_ref = C;
+      for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
+        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
+          C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
+              A * B;
+
+          MatrixMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, 1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
+              << "C += A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_plus_ref << "\n"
+              << "C: \n" << C_plus;
+
+
+          C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
+              A * B;
+
+          MatrixMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, -1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+           EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
+              << "C -= A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_minus_ref << "\n"
+              << "C: \n" << C_minus;
+
+          C_assign_ref.block(start_row_c, start_col_c, kRowA, kColB) =
+              A * B;
+
+          MatrixMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, 0>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
+              << "C = A * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_assign_ref << "\n"
+              << "C: \n" << C_assign;
+        }
+      }
+    }
+  }
+}
+
+TEST(BLAS, MatrixTransposeMatrixMultiplyNaive) {
+  const int kRowA = 5;
+  const int kColA = 3;
+  Matrix A(kRowA, kColA);
+  A.setOnes();
+
+  const int kRowB = 5;
+  const int kColB = 7;
+  Matrix B(kRowB, kColB);
+  B.setOnes();
+
+  for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
+    for (int col_stride_c = kColB; col_stride_c <  3 * kColB; ++col_stride_c) {
+      Matrix C(row_stride_c, col_stride_c);
+      C.setOnes();
+
+      Matrix C_plus = C;
+      Matrix C_minus = C;
+      Matrix C_assign = C;
+
+      Matrix C_plus_ref = C;
+      Matrix C_minus_ref = C;
+      Matrix C_assign_ref = C;
+      for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
+        for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
+          C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, 1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
+              << "C += A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_plus_ref << "\n"
+              << "C: \n" << C_plus;
+
+          C_minus_ref.block(start_row_c, start_col_c, kColA, kColB) -=
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, -1>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
+              << "C -= A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_minus_ref << "\n"
+              << "C: \n" << C_minus;
+
+          C_assign_ref.block(start_row_c, start_col_c, kColA, kColB) =
+              A.transpose() * B;
+
+          MatrixTransposeMatrixMultiplyNaive<kRowA, kColA, kRowB, kColB, 0>(
+              A.data(), kRowA, kColA,
+              B.data(), kRowB, kColB,
+              C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
+
+          EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
+              << "C = A' * B \n"
+              << "row_stride_c : " << row_stride_c << "\n"
+              << "col_stride_c : " << col_stride_c << "\n"
+              << "start_row_c  : " << start_row_c << "\n"
+              << "start_col_c  : " << start_col_c << "\n"
+              << "Cref : \n" << C_assign_ref << "\n"
+              << "C: \n" << C_assign;
+        }
+      }
+    }
+  }
+}
+
+TEST(BLAS, MatrixVectorMultiply) {
+  for (int num_rows_a = 1; num_rows_a < 10; ++num_rows_a) {
+    for (int num_cols_a = 1; num_cols_a < 10; ++num_cols_a) {
+      Matrix A(num_rows_a, num_cols_a);
+      A.setOnes();
+
+      Vector b(num_cols_a);
+      b.setOnes();
+
+      Vector c(num_rows_a);
+      c.setOnes();
+
+      Vector c_plus = c;
+      Vector c_minus = c;
+      Vector c_assign = c;
+
+      Vector c_plus_ref = c;
+      Vector c_minus_ref = c;
+      Vector c_assign_ref = c;
+
+      c_plus_ref += A * b;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_plus.data());
+      EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
+          << "c += A * b \n"
+          << "c_ref : \n" << c_plus_ref << "\n"
+          << "c: \n" << c_plus;
+
+      c_minus_ref -= A * b;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, -1>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_minus.data());
+      EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
+          << "c += A * b \n"
+          << "c_ref : \n" << c_minus_ref << "\n"
+          << "c: \n" << c_minus;
+
+      c_assign_ref = A * b;
+      MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 0>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_assign.data());
+      EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
+          << "c += A * b \n"
+          << "c_ref : \n" << c_assign_ref << "\n"
+          << "c: \n" << c_assign;
+    }
+  }
+}
+
+TEST(BLAS, MatrixTransposeVectorMultiply) {
+  for (int num_rows_a = 1; num_rows_a < 10; ++num_rows_a) {
+    for (int num_cols_a = 1; num_cols_a < 10; ++num_cols_a) {
+      Matrix A(num_rows_a, num_cols_a);
+      A.setRandom();
+
+      Vector b(num_rows_a);
+      b.setRandom();
+
+      Vector c(num_cols_a);
+      c.setOnes();
+
+      Vector c_plus = c;
+      Vector c_minus = c;
+      Vector c_assign = c;
+
+      Vector c_plus_ref = c;
+      Vector c_minus_ref = c;
+      Vector c_assign_ref = c;
+
+      c_plus_ref += A.transpose() * b;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_plus.data());
+      EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
+          << "c += A' * b \n"
+          << "c_ref : \n" << c_plus_ref << "\n"
+          << "c: \n" << c_plus;
+
+      c_minus_ref -= A.transpose() * b;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, -1>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_minus.data());
+      EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
+          << "c += A' * b \n"
+          << "c_ref : \n" << c_minus_ref << "\n"
+          << "c: \n" << c_minus;
+
+      c_assign_ref = A.transpose() * b;
+      MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 0>(
+          A.data(), num_rows_a, num_cols_a,
+          b.data(),
+          c_assign.data());
+      EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
+          << "c += A' * b \n"
+          << "c_ref : \n" << c_assign_ref << "\n"
+          << "c: \n" << c_assign;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
new file mode 100644
index 0000000..f8ad2c9
--- /dev/null
+++ b/internal/ceres/solver.cc
@@ -0,0 +1,838 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/solver.h"
+
+#include <algorithm>
+#include <memory>
+#include <sstream>  // NOLINT
+#include <vector>
+
+#include "ceres/casts.h"
+#include "ceres/context.h"
+#include "ceres/context_impl.h"
+#include "ceres/detect_structure.h"
+#include "ceres/gradient_checking_cost_function.h"
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/preprocessor.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/schur_templates.h"
+#include "ceres/solver_utils.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace {
+
+using std::map;
+using std::string;
+using std::vector;
+using internal::StringAppendF;
+using internal::StringPrintf;
+
+#define OPTION_OP(x, y, OP)                                             \
+  if (!(options.x OP y)) {                                              \
+    std::stringstream ss;                                               \
+    ss << "Invalid configuration. ";                                    \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
+    ss << "Violated constraint: ";                                      \
+    ss << string("Solver::Options::" #x " " #OP " "#y);                 \
+    *error = ss.str();                                                  \
+    return false;                                                       \
+  }
+
+#define OPTION_OP_OPTION(x, y, OP)                                      \
+  if (!(options.x OP options.y)) {                                      \
+    std::stringstream ss;                                               \
+    ss << "Invalid configuration. ";                                    \
+    ss << string("Solver::Options::" #x " = ") << options.x << ". ";    \
+    ss << string("Solver::Options::" #y " = ") << options.y << ". ";    \
+    ss << "Violated constraint: ";                                      \
+    ss << string("Solver::Options::" #x);                               \
+    ss << string(#OP " Solver::Options::" #y ".");                      \
+    *error = ss.str();                                                  \
+    return false;                                                       \
+  }
+
+#define OPTION_GE(x, y) OPTION_OP(x, y, >=);
+#define OPTION_GT(x, y) OPTION_OP(x, y, >);
+#define OPTION_LE(x, y) OPTION_OP(x, y, <=);
+#define OPTION_LT(x, y) OPTION_OP(x, y, <);
+#define OPTION_LE_OPTION(x, y) OPTION_OP_OPTION(x, y, <=)
+#define OPTION_LT_OPTION(x, y) OPTION_OP_OPTION(x, y, <)
+
+bool CommonOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GE(max_num_iterations, 0);
+  OPTION_GE(max_solver_time_in_seconds, 0.0);
+  OPTION_GE(function_tolerance, 0.0);
+  OPTION_GE(gradient_tolerance, 0.0);
+  OPTION_GE(parameter_tolerance, 0.0);
+  OPTION_GT(num_threads, 0);
+  if (options.check_gradients) {
+    OPTION_GT(gradient_check_relative_precision, 0.0);
+    OPTION_GT(gradient_check_numeric_derivative_relative_step_size, 0.0);
+  }
+  return true;
+}
+
+bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GT(initial_trust_region_radius, 0.0);
+  OPTION_GT(min_trust_region_radius, 0.0);
+  OPTION_GT(max_trust_region_radius, 0.0);
+  OPTION_LE_OPTION(min_trust_region_radius, max_trust_region_radius);
+  OPTION_LE_OPTION(min_trust_region_radius, initial_trust_region_radius);
+  OPTION_LE_OPTION(initial_trust_region_radius, max_trust_region_radius);
+  OPTION_GE(min_relative_decrease, 0.0);
+  OPTION_GE(min_lm_diagonal, 0.0);
+  OPTION_GE(max_lm_diagonal, 0.0);
+  OPTION_LE_OPTION(min_lm_diagonal, max_lm_diagonal);
+  OPTION_GE(max_num_consecutive_invalid_steps, 0);
+  OPTION_GT(eta, 0.0);
+  OPTION_GE(min_linear_solver_iterations, 0);
+  OPTION_GE(max_linear_solver_iterations, 1);
+  OPTION_LE_OPTION(min_linear_solver_iterations, max_linear_solver_iterations);
+
+  if (options.use_inner_iterations) {
+    OPTION_GE(inner_iteration_tolerance, 0.0);
+  }
+
+  if (options.use_inner_iterations &&
+      options.evaluation_callback != NULL) {
+    *error =  "Inner iterations (use_inner_iterations = true) can't be "
+        "combined with an evaluation callback "
+        "(options.evaluation_callback != NULL).";
+    return false;
+  }
+
+  if (options.use_nonmonotonic_steps) {
+    OPTION_GT(max_consecutive_nonmonotonic_steps, 0);
+  }
+
+  if (options.linear_solver_type == ITERATIVE_SCHUR &&
+      options.use_explicit_schur_complement &&
+      options.preconditioner_type != SCHUR_JACOBI) {
+    *error =  "use_explicit_schur_complement only supports "
+        "SCHUR_JACOBI as the preconditioner.";
+    return false;
+  }
+
+  if (options.dense_linear_algebra_library_type == LAPACK &&
+      !IsDenseLinearAlgebraLibraryTypeAvailable(LAPACK) &&
+      (options.linear_solver_type == DENSE_NORMAL_CHOLESKY ||
+       options.linear_solver_type == DENSE_QR ||
+       options.linear_solver_type == DENSE_SCHUR)) {
+    *error = StringPrintf(
+        "Can't use %s with "
+        "Solver::Options::dense_linear_algebra_library_type = LAPACK "
+        "because LAPACK was not enabled when Ceres was built.",
+        LinearSolverTypeToString(options.linear_solver_type));
+    return false;
+  }
+
+  if (options.sparse_linear_algebra_library_type == NO_SPARSE) {
+    const char* error_template =
+        "Can't use %s with "
+        "Solver::Options::sparse_linear_algebra_library_type = NO_SPARSE.";
+    const char* name = nullptr;
+
+    if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY ||
+        options.linear_solver_type == SPARSE_SCHUR) {
+      name = LinearSolverTypeToString(options.linear_solver_type);
+    } else if (options.linear_solver_type == ITERATIVE_SCHUR &&
+               (options.preconditioner_type == CLUSTER_JACOBI ||
+                options.preconditioner_type == CLUSTER_TRIDIAGONAL)) {
+      name = PreconditionerTypeToString(options.preconditioner_type);
+    }
+
+    if (name != nullptr) {
+      *error = StringPrintf(error_template, name);
+      return false;
+    }
+  } else if (!IsSparseLinearAlgebraLibraryTypeAvailable(
+                 options.sparse_linear_algebra_library_type)) {
+    const char* error_template =
+        "Can't use %s with "
+        "Solver::Options::sparse_linear_algebra_library_type = %s, "
+        "because support was not enabled when Ceres Solver was built.";
+    const char* name = nullptr;
+    if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY ||
+        options.linear_solver_type == SPARSE_SCHUR) {
+      name = LinearSolverTypeToString(options.linear_solver_type);
+    } else if (options.linear_solver_type == ITERATIVE_SCHUR &&
+               (options.preconditioner_type == CLUSTER_JACOBI ||
+                options.preconditioner_type == CLUSTER_TRIDIAGONAL)) {
+      name = PreconditionerTypeToString(options.preconditioner_type);
+    }
+
+    if (name != nullptr) {
+      *error = StringPrintf(error_template,
+                            name,
+                            SparseLinearAlgebraLibraryTypeToString(
+                                options.sparse_linear_algebra_library_type));
+      return false;
+    }
+  }
+
+  if (options.trust_region_strategy_type == DOGLEG) {
+    if (options.linear_solver_type == ITERATIVE_SCHUR ||
+        options.linear_solver_type == CGNR) {
+      *error = "DOGLEG only supports exact factorization based linear "
+          "solvers. If you want to use an iterative solver please "
+          "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
+      return false;
+    }
+  }
+
+  if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
+      options.trust_region_problem_dump_format_type != CONSOLE &&
+      options.trust_region_problem_dump_directory.empty()) {
+    *error = "Solver::Options::trust_region_problem_dump_directory is empty.";
+    return false;
+  }
+
+  if (options.dynamic_sparsity &&
+      options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
+    *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+    return false;
+  }
+
+  return true;
+}
+
+bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
+  OPTION_GT(max_lbfgs_rank, 0);
+  OPTION_GT(min_line_search_step_size, 0.0);
+  OPTION_GT(max_line_search_step_contraction, 0.0);
+  OPTION_LT(max_line_search_step_contraction, 1.0);
+  OPTION_LT_OPTION(max_line_search_step_contraction,
+                   min_line_search_step_contraction);
+  OPTION_LE(min_line_search_step_contraction, 1.0);
+  OPTION_GT(max_num_line_search_step_size_iterations, 0);
+  OPTION_GT(line_search_sufficient_function_decrease, 0.0);
+  OPTION_LT_OPTION(line_search_sufficient_function_decrease,
+                   line_search_sufficient_curvature_decrease);
+  OPTION_LT(line_search_sufficient_curvature_decrease, 1.0);
+  OPTION_GT(max_line_search_step_expansion, 1.0);
+
+  if ((options.line_search_direction_type == ceres::BFGS ||
+       options.line_search_direction_type == ceres::LBFGS) &&
+      options.line_search_type != ceres::WOLFE) {
+    *error =
+        string("Invalid configuration: Solver::Options::line_search_type = ")
+        + string(LineSearchTypeToString(options.line_search_type))
+        + string(". When using (L)BFGS, "
+                 "Solver::Options::line_search_type must be set to WOLFE.");
+    return false;
+  }
+
+  // Warn user if they have requested BISECTION interpolation, but constraints
+  // on max/min step size change during line search prevent bisection scaling
+  // from occurring. Warn only, as this is likely a user mistake, but one which
+  // does not prevent us from continuing.
+  LOG_IF(WARNING,
+         (options.line_search_interpolation_type == ceres::BISECTION &&
+          (options.max_line_search_step_contraction > 0.5 ||
+           options.min_line_search_step_contraction < 0.5)))
+      << "Line search interpolation type is BISECTION, but specified "
+      << "max_line_search_step_contraction: "
+      << options.max_line_search_step_contraction << ", and "
+      << "min_line_search_step_contraction: "
+      << options.min_line_search_step_contraction
+      << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
+
+  return true;
+}
+
+#undef OPTION_OP
+#undef OPTION_OP_OPTION
+#undef OPTION_GT
+#undef OPTION_GE
+#undef OPTION_LE
+#undef OPTION_LT
+#undef OPTION_LE_OPTION
+#undef OPTION_LT_OPTION
+
+void StringifyOrdering(const vector<int>& ordering, string* report) {
+  if (ordering.size() == 0) {
+    internal::StringAppendF(report, "AUTOMATIC");
+    return;
+  }
+
+  for (int i = 0; i < ordering.size() - 1; ++i) {
+    internal::StringAppendF(report, "%d,", ordering[i]);
+  }
+  internal::StringAppendF(report, "%d", ordering.back());
+}
+
+void SummarizeGivenProgram(const internal::Program& program,
+                           Solver::Summary* summary) {
+  summary->num_parameter_blocks     = program.NumParameterBlocks();
+  summary->num_parameters           = program.NumParameters();
+  summary->num_effective_parameters = program.NumEffectiveParameters();
+  summary->num_residual_blocks      = program.NumResidualBlocks();
+  summary->num_residuals            = program.NumResiduals();
+}
+
+void SummarizeReducedProgram(const internal::Program& program,
+                             Solver::Summary* summary) {
+  summary->num_parameter_blocks_reduced     = program.NumParameterBlocks();
+  summary->num_parameters_reduced           = program.NumParameters();
+  summary->num_effective_parameters_reduced = program.NumEffectiveParameters();
+  summary->num_residual_blocks_reduced      = program.NumResidualBlocks();
+  summary->num_residuals_reduced            = program.NumResiduals();
+}
+
+void PreSolveSummarize(const Solver::Options& options,
+                       const internal::ProblemImpl* problem,
+                       Solver::Summary* summary) {
+  SummarizeGivenProgram(problem->program(), summary);
+  internal::OrderingToGroupSizes(options.linear_solver_ordering.get(),
+                                 &(summary->linear_solver_ordering_given));
+  internal::OrderingToGroupSizes(options.inner_iteration_ordering.get(),
+                                 &(summary->inner_iteration_ordering_given));
+
+  summary->dense_linear_algebra_library_type  = options.dense_linear_algebra_library_type;  //  NOLINT
+  summary->dogleg_type                        = options.dogleg_type;
+  summary->inner_iteration_time_in_seconds    = 0.0;
+  summary->num_line_search_steps              = 0;
+  summary->line_search_cost_evaluation_time_in_seconds = 0.0;
+  summary->line_search_gradient_evaluation_time_in_seconds = 0.0;
+  summary->line_search_polynomial_minimization_time_in_seconds = 0.0;
+  summary->line_search_total_time_in_seconds  = 0.0;
+  summary->inner_iterations_given             = options.use_inner_iterations;
+  summary->line_search_direction_type         = options.line_search_direction_type;         //  NOLINT
+  summary->line_search_interpolation_type     = options.line_search_interpolation_type;     //  NOLINT
+  summary->line_search_type                   = options.line_search_type;
+  summary->linear_solver_type_given           = options.linear_solver_type;
+  summary->max_lbfgs_rank                     = options.max_lbfgs_rank;
+  summary->minimizer_type                     = options.minimizer_type;
+  summary->nonlinear_conjugate_gradient_type  = options.nonlinear_conjugate_gradient_type;  //  NOLINT
+  summary->num_threads_given                  = options.num_threads;
+  summary->preconditioner_type_given          = options.preconditioner_type;
+  summary->sparse_linear_algebra_library_type = options.sparse_linear_algebra_library_type; //  NOLINT
+  summary->trust_region_strategy_type         = options.trust_region_strategy_type;         //  NOLINT
+  summary->visibility_clustering_type         = options.visibility_clustering_type;         //  NOLINT
+}
+
+void PostSolveSummarize(const internal::PreprocessedProblem& pp,
+                        Solver::Summary* summary) {
+  internal::OrderingToGroupSizes(pp.options.linear_solver_ordering.get(),
+                                 &(summary->linear_solver_ordering_used));
+  internal::OrderingToGroupSizes(pp.options.inner_iteration_ordering.get(),
+                                 &(summary->inner_iteration_ordering_used));
+
+  summary->inner_iterations_used          = pp.inner_iteration_minimizer.get() != NULL;     // NOLINT
+  summary->linear_solver_type_used        = pp.linear_solver_options.type;
+  summary->num_threads_used               = pp.options.num_threads;
+  summary->preconditioner_type_used       = pp.options.preconditioner_type;
+
+  internal::SetSummaryFinalCost(summary);
+
+  if (pp.reduced_program.get() != NULL) {
+    SummarizeReducedProgram(*pp.reduced_program, summary);
+  }
+
+  using internal::CallStatistics;
+
+  // It is possible that no evaluator was created. This would be the
+  // case if the preprocessor failed, or if the reduced problem did
+  // not contain any parameter blocks. Thus, only extract the
+  // evaluator statistics if one exists.
+  if (pp.evaluator.get() != NULL) {
+    const map<string, CallStatistics>& evaluator_statistics =
+        pp.evaluator->Statistics();
+    {
+      const CallStatistics& call_stats = FindWithDefault(
+          evaluator_statistics, "Evaluator::Residual", CallStatistics());
+
+      summary->residual_evaluation_time_in_seconds = call_stats.time;
+      summary->num_residual_evaluations = call_stats.calls;
+    }
+    {
+      const CallStatistics& call_stats = FindWithDefault(
+          evaluator_statistics, "Evaluator::Jacobian", CallStatistics());
+
+      summary->jacobian_evaluation_time_in_seconds = call_stats.time;
+      summary->num_jacobian_evaluations = call_stats.calls;
+    }
+  }
+
+  // Again, like the evaluator, there may or may not be a linear
+  // solver from which we can extract run time statistics. In
+  // particular the line search solver does not use a linear solver.
+  if (pp.linear_solver.get() != NULL) {
+    const map<string, CallStatistics>& linear_solver_statistics =
+        pp.linear_solver->Statistics();
+    const CallStatistics& call_stats = FindWithDefault(
+        linear_solver_statistics, "LinearSolver::Solve", CallStatistics());
+    summary->num_linear_solves = call_stats.calls;
+    summary->linear_solver_time_in_seconds = call_stats.time;
+  }
+}
+
+void Minimize(internal::PreprocessedProblem* pp,
+              Solver::Summary* summary) {
+  using internal::Program;
+  using internal::Minimizer;
+
+  Program* program = pp->reduced_program.get();
+  if (pp->reduced_program->NumParameterBlocks() == 0) {
+    summary->message = "Function tolerance reached. "
+        "No non-constant parameter blocks found.";
+    summary->termination_type = CONVERGENCE;
+    VLOG_IF(1, pp->options.logging_type != SILENT) << summary->message;
+    summary->initial_cost = summary->fixed_cost;
+    summary->final_cost = summary->fixed_cost;
+    return;
+  }
+
+  const Vector original_reduced_parameters = pp->reduced_parameters;
+  std::unique_ptr<Minimizer> minimizer(
+      Minimizer::Create(pp->options.minimizer_type));
+  minimizer->Minimize(pp->minimizer_options,
+                      pp->reduced_parameters.data(),
+                      summary);
+
+  program->StateVectorToParameterBlocks(
+      summary->IsSolutionUsable()
+      ? pp->reduced_parameters.data()
+      : original_reduced_parameters.data());
+  program->CopyParameterBlockStateToUserState();
+}
+
+std::string SchurStructureToString(const int row_block_size,
+                                   const int e_block_size,
+                                   const int f_block_size) {
+  const std::string row =
+      (row_block_size == Eigen::Dynamic)
+      ? "d" : internal::StringPrintf("%d", row_block_size);
+
+  const std::string e =
+      (e_block_size == Eigen::Dynamic)
+      ? "d" : internal::StringPrintf("%d", e_block_size);
+
+  const std::string f =
+      (f_block_size == Eigen::Dynamic)
+      ? "d" : internal::StringPrintf("%d", f_block_size);
+
+  return internal::StringPrintf("%s,%s,%s", row.c_str(), e.c_str(), f.c_str());
+}
+
+}  // namespace
+
+bool Solver::Options::IsValid(string* error) const {
+  if (!CommonOptionsAreValid(*this, error)) {
+    return false;
+  }
+
+  if (minimizer_type == TRUST_REGION &&
+      !TrustRegionOptionsAreValid(*this, error)) {
+    return false;
+  }
+
+  // We do not know if the problem is bounds constrained or not, if it
+  // is then the trust region solver will also use the line search
+  // solver to do a projection onto the box constraints, so make sure
+  // that the line search options are checked independent of what
+  // minimizer algorithm is being used.
+  return LineSearchOptionsAreValid(*this, error);
+}
+
+Solver::~Solver() {}
+
+void Solver::Solve(const Solver::Options& options,
+                   Problem* problem,
+                   Solver::Summary* summary) {
+  using internal::PreprocessedProblem;
+  using internal::Preprocessor;
+  using internal::ProblemImpl;
+  using internal::Program;
+  using internal::WallTimeInSeconds;
+
+  CHECK(problem != nullptr);
+  CHECK(summary != nullptr);
+
+  double start_time = WallTimeInSeconds();
+  *summary = Summary();
+  if (!options.IsValid(&summary->message)) {
+    LOG(ERROR) << "Terminating: " << summary->message;
+    return;
+  }
+
+  ProblemImpl* problem_impl = problem->problem_impl_.get();
+  Program* program = problem_impl->mutable_program();
+  PreSolveSummarize(options, problem_impl, summary);
+
+  // If gradient_checking is enabled, wrap all cost functions in a
+  // gradient checker and install a callback that terminates if any gradient
+  // error is detected.
+  std::unique_ptr<internal::ProblemImpl> gradient_checking_problem;
+  internal::GradientCheckingIterationCallback gradient_checking_callback;
+  Solver::Options modified_options = options;
+  if (options.check_gradients) {
+    modified_options.callbacks.push_back(&gradient_checking_callback);
+    gradient_checking_problem.reset(
+        CreateGradientCheckingProblemImpl(
+            problem_impl,
+            options.gradient_check_numeric_derivative_relative_step_size,
+            options.gradient_check_relative_precision,
+            &gradient_checking_callback));
+    problem_impl = gradient_checking_problem.get();
+    program = problem_impl->mutable_program();
+  }
+
+  // Make sure that all the parameter blocks states are set to the
+  // values provided by the user.
+  program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+  // The main thread also does work so we only need to launch num_threads - 1.
+  problem_impl->context()->EnsureMinimumThreads(options.num_threads - 1);
+
+  std::unique_ptr<Preprocessor> preprocessor(
+      Preprocessor::Create(modified_options.minimizer_type));
+  PreprocessedProblem pp;
+
+  const bool status = preprocessor->Preprocess(modified_options, problem_impl, &pp);
+
+  // We check the linear_solver_options.type rather than
+  // modified_options.linear_solver_type because, depending on the
+  // lack of a Schur structure, the preprocessor may change the linear
+  // solver type.
+  if (IsSchurType(pp.linear_solver_options.type)) {
+    // TODO(sameeragarwal): We can likely eliminate the duplicate call
+    // to DetectStructure here and inside the linear solver, by
+    // calling this in the preprocessor.
+    int row_block_size;
+    int e_block_size;
+    int f_block_size;
+    DetectStructure(*static_cast<internal::BlockSparseMatrix*>(
+                        pp.minimizer_options.jacobian.get())
+                    ->block_structure(),
+                    pp.linear_solver_options.elimination_groups[0],
+                    &row_block_size,
+                    &e_block_size,
+                    &f_block_size);
+    summary->schur_structure_given =
+        SchurStructureToString(row_block_size, e_block_size, f_block_size);
+    internal::GetBestSchurTemplateSpecialization(&row_block_size,
+                                                 &e_block_size,
+                                                 &f_block_size);
+    summary->schur_structure_used =
+        SchurStructureToString(row_block_size, e_block_size, f_block_size);
+  }
+
+  summary->fixed_cost = pp.fixed_cost;
+  summary->preprocessor_time_in_seconds = WallTimeInSeconds() - start_time;
+
+  if (status) {
+    const double minimizer_start_time = WallTimeInSeconds();
+    Minimize(&pp, summary);
+    summary->minimizer_time_in_seconds =
+        WallTimeInSeconds() - minimizer_start_time;
+  } else {
+    summary->message = pp.error;
+  }
+
+  const double postprocessor_start_time = WallTimeInSeconds();
+  problem_impl = problem->problem_impl_.get();
+  program = problem_impl->mutable_program();
+  // On exit, ensure that the parameter blocks again point at the user
+  // provided values and the parameter blocks are numbered according
+  // to their position in the original user provided program.
+  program->SetParameterBlockStatePtrsToUserStatePtrs();
+  program->SetParameterOffsetsAndIndex();
+  PostSolveSummarize(pp, summary);
+  summary->postprocessor_time_in_seconds =
+      WallTimeInSeconds() - postprocessor_start_time;
+
+  // If the gradient checker reported an error, we want to report FAILURE
+  // instead of USER_FAILURE and provide the error log.
+  if (gradient_checking_callback.gradient_error_detected()) {
+    summary->termination_type = FAILURE;
+    summary->message = gradient_checking_callback.error_log();
+  }
+
+  summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
+}
+
+void Solve(const Solver::Options& options,
+           Problem* problem,
+           Solver::Summary* summary) {
+  Solver solver;
+  solver.Solve(options, problem, summary);
+}
+
+string Solver::Summary::BriefReport() const {
+  return StringPrintf("Ceres Solver Report: "
+                      "Iterations: %d, "
+                      "Initial cost: %e, "
+                      "Final cost: %e, "
+                      "Termination: %s",
+                      num_successful_steps + num_unsuccessful_steps,
+                      initial_cost,
+                      final_cost,
+                      TerminationTypeToString(termination_type));
+}
+
+string Solver::Summary::FullReport() const {
+  using internal::VersionString;
+
+  string report = string("\nSolver Summary (v " + VersionString() + ")\n\n");
+
+  StringAppendF(&report, "%45s    %21s\n", "Original", "Reduced");
+  StringAppendF(&report, "Parameter blocks    % 25d% 25d\n",
+                num_parameter_blocks, num_parameter_blocks_reduced);
+  StringAppendF(&report, "Parameters          % 25d% 25d\n",
+                num_parameters, num_parameters_reduced);
+  if (num_effective_parameters_reduced != num_parameters_reduced) {
+    StringAppendF(&report, "Effective parameters% 25d% 25d\n",
+                  num_effective_parameters, num_effective_parameters_reduced);
+  }
+  StringAppendF(&report, "Residual blocks     % 25d% 25d\n",
+                num_residual_blocks, num_residual_blocks_reduced);
+  StringAppendF(&report, "Residuals           % 25d% 25d\n",
+                num_residuals, num_residuals_reduced);
+
+  if (minimizer_type == TRUST_REGION) {
+    // TRUST_SEARCH HEADER
+    StringAppendF(&report, "\nMinimizer                 %19s\n",
+                  "TRUST_REGION");
+
+    if (linear_solver_type_used == DENSE_NORMAL_CHOLESKY ||
+        linear_solver_type_used == DENSE_SCHUR ||
+        linear_solver_type_used == DENSE_QR) {
+      StringAppendF(&report, "\nDense linear algebra library  %15s\n",
+                    DenseLinearAlgebraLibraryTypeToString(
+                        dense_linear_algebra_library_type));
+    }
+
+    if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY ||
+        linear_solver_type_used == SPARSE_SCHUR ||
+        (linear_solver_type_used == ITERATIVE_SCHUR &&
+         (preconditioner_type_used == CLUSTER_JACOBI ||
+          preconditioner_type_used == CLUSTER_TRIDIAGONAL))) {
+      StringAppendF(&report, "\nSparse linear algebra library %15s\n",
+                    SparseLinearAlgebraLibraryTypeToString(
+                        sparse_linear_algebra_library_type));
+    }
+
+    StringAppendF(&report, "Trust region strategy     %19s",
+                  TrustRegionStrategyTypeToString(
+                      trust_region_strategy_type));
+    if (trust_region_strategy_type == DOGLEG) {
+      if (dogleg_type == TRADITIONAL_DOGLEG) {
+        StringAppendF(&report, " (TRADITIONAL)");
+      } else {
+        StringAppendF(&report, " (SUBSPACE)");
+      }
+    }
+    StringAppendF(&report, "\n");
+    StringAppendF(&report, "\n");
+
+    StringAppendF(&report, "%45s    %21s\n", "Given",  "Used");
+    StringAppendF(&report, "Linear solver       %25s%25s\n",
+                  LinearSolverTypeToString(linear_solver_type_given),
+                  LinearSolverTypeToString(linear_solver_type_used));
+
+    if (linear_solver_type_given == CGNR ||
+        linear_solver_type_given == ITERATIVE_SCHUR) {
+      StringAppendF(&report, "Preconditioner      %25s%25s\n",
+                    PreconditionerTypeToString(preconditioner_type_given),
+                    PreconditionerTypeToString(preconditioner_type_used));
+    }
+
+    if (preconditioner_type_used == CLUSTER_JACOBI ||
+        preconditioner_type_used == CLUSTER_TRIDIAGONAL) {
+      StringAppendF(&report, "Visibility clustering%24s%25s\n",
+                    VisibilityClusteringTypeToString(
+                        visibility_clustering_type),
+                    VisibilityClusteringTypeToString(
+                        visibility_clustering_type));
+    }
+    StringAppendF(&report, "Threads             % 25d% 25d\n",
+                  num_threads_given, num_threads_used);
+
+    string given;
+    StringifyOrdering(linear_solver_ordering_given, &given);
+    string used;
+    StringifyOrdering(linear_solver_ordering_used, &used);
+    StringAppendF(&report,
+                  "Linear solver ordering %22s %24s\n",
+                  given.c_str(),
+                  used.c_str());
+    if (IsSchurType(linear_solver_type_used)) {
+      StringAppendF(&report,
+                    "Schur structure        %22s %24s\n",
+                    schur_structure_given.c_str(),
+                    schur_structure_used.c_str());
+    }
+
+    if (inner_iterations_given) {
+      StringAppendF(&report,
+                    "Use inner iterations     %20s     %20s\n",
+                    inner_iterations_given ? "True" : "False",
+                    inner_iterations_used ? "True" : "False");
+    }
+
+    if (inner_iterations_used) {
+      string given;
+      StringifyOrdering(inner_iteration_ordering_given, &given);
+      string used;
+      StringifyOrdering(inner_iteration_ordering_used, &used);
+    StringAppendF(&report,
+                  "Inner iteration ordering %20s %24s\n",
+                  given.c_str(),
+                  used.c_str());
+    }
+  } else {
+    // LINE_SEARCH HEADER
+    StringAppendF(&report, "\nMinimizer                 %19s\n", "LINE_SEARCH");
+
+
+    string line_search_direction_string;
+    if (line_search_direction_type == LBFGS) {
+      line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
+    } else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
+      line_search_direction_string =
+          NonlinearConjugateGradientTypeToString(
+              nonlinear_conjugate_gradient_type);
+    } else {
+      line_search_direction_string =
+          LineSearchDirectionTypeToString(line_search_direction_type);
+    }
+
+    StringAppendF(&report, "Line search direction     %19s\n",
+                  line_search_direction_string.c_str());
+
+    const string line_search_type_string =
+        StringPrintf("%s %s",
+                     LineSearchInterpolationTypeToString(
+                         line_search_interpolation_type),
+                     LineSearchTypeToString(line_search_type));
+    StringAppendF(&report, "Line search type          %19s\n",
+                  line_search_type_string.c_str());
+    StringAppendF(&report, "\n");
+
+    StringAppendF(&report, "%45s    %21s\n", "Given",  "Used");
+    StringAppendF(&report, "Threads             % 25d% 25d\n",
+                  num_threads_given, num_threads_used);
+  }
+
+  StringAppendF(&report, "\nCost:\n");
+  StringAppendF(&report, "Initial        % 30e\n", initial_cost);
+  if (termination_type != FAILURE &&
+      termination_type != USER_FAILURE) {
+    StringAppendF(&report, "Final          % 30e\n", final_cost);
+    StringAppendF(&report, "Change         % 30e\n",
+                  initial_cost - final_cost);
+  }
+
+  StringAppendF(&report, "\nMinimizer iterations         % 16d\n",
+                num_successful_steps + num_unsuccessful_steps);
+
+  // Successful/Unsuccessful steps only matter in the case of the
+  // trust region solver. Line search terminates when it encounters
+  // the first unsuccessful step.
+  if (minimizer_type == TRUST_REGION) {
+    StringAppendF(&report, "Successful steps               % 14d\n",
+                  num_successful_steps);
+    StringAppendF(&report, "Unsuccessful steps             % 14d\n",
+                  num_unsuccessful_steps);
+  }
+  if (inner_iterations_used) {
+    StringAppendF(&report, "Steps with inner iterations    % 14d\n",
+                  num_inner_iteration_steps);
+  }
+
+  const bool line_search_used =
+      (minimizer_type == LINE_SEARCH ||
+       (minimizer_type == TRUST_REGION && is_constrained));
+
+  if (line_search_used) {
+    StringAppendF(&report, "Line search steps              % 14d\n",
+                  num_line_search_steps);
+  }
+
+  StringAppendF(&report, "\nTime (in seconds):\n");
+  StringAppendF(&report, "Preprocessor        %25.6f\n",
+                preprocessor_time_in_seconds);
+
+  StringAppendF(&report, "\n  Residual only evaluation %18.6f (%d)\n",
+                residual_evaluation_time_in_seconds, num_residual_evaluations);
+  if (line_search_used) {
+    StringAppendF(&report, "    Line search cost evaluation    %10.6f\n",
+                  line_search_cost_evaluation_time_in_seconds);
+  }
+  StringAppendF(&report, "  Jacobian & residual evaluation %12.6f (%d)\n",
+                jacobian_evaluation_time_in_seconds, num_jacobian_evaluations);
+  if (line_search_used) {
+    StringAppendF(&report, "    Line search gradient evaluation   %6.6f\n",
+                  line_search_gradient_evaluation_time_in_seconds);
+  }
+
+  if (minimizer_type == TRUST_REGION) {
+    StringAppendF(&report, "  Linear solver       %23.6f (%d)\n",
+                  linear_solver_time_in_seconds, num_linear_solves);
+  }
+
+  if (inner_iterations_used) {
+    StringAppendF(&report, "  Inner iterations    %23.6f\n",
+                  inner_iteration_time_in_seconds);
+  }
+
+  if (line_search_used) {
+    StringAppendF(&report, "  Line search polynomial minimization  %.6f\n",
+                  line_search_polynomial_minimization_time_in_seconds);
+  }
+
+  StringAppendF(&report, "Minimizer           %25.6f\n\n",
+                minimizer_time_in_seconds);
+
+  StringAppendF(&report, "Postprocessor        %24.6f\n",
+                postprocessor_time_in_seconds);
+
+  StringAppendF(&report, "Total               %25.6f\n\n",
+                total_time_in_seconds);
+
+  StringAppendF(&report, "Termination:        %25s (%s)\n",
+                TerminationTypeToString(termination_type), message.c_str());
+  return report;
+}
+
+bool Solver::Summary::IsSolutionUsable() const {
+  return internal::IsSolutionUsable(*this);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/solver_test.cc b/internal/ceres/solver_test.cc
new file mode 100644
index 0000000..6acae0b
--- /dev/null
+++ b/internal/ceres/solver_test.cc
@@ -0,0 +1,487 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/solver.h"
+
+#include <limits>
+#include <memory>
+#include <cmath>
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/evaluation_callback.h"
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+TEST(SolverOptions, DefaultTrustRegionOptionsAreValid) {
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  string error;
+  EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+TEST(SolverOptions, DefaultLineSearchOptionsAreValid) {
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  string error;
+  EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+struct QuadraticCostFunctor {
+  template <typename T> bool operator()(const T* const x,
+                                        T* residual) const {
+    residual[0] = T(5.0) - *x;
+    return true;
+  }
+
+  static CostFunction* Create() {
+    return new AutoDiffCostFunction<QuadraticCostFunctor, 1, 1>(
+        new QuadraticCostFunctor);
+  }
+};
+
+struct RememberingCallback : public IterationCallback {
+  explicit RememberingCallback(double *x) : calls(0), x(x) {}
+  virtual ~RememberingCallback() {}
+  virtual CallbackReturnType operator()(const IterationSummary& summary) {
+    x_values.push_back(*x);
+    return SOLVER_CONTINUE;
+  }
+  int calls;
+  double *x;
+  std::vector<double> x_values;
+};
+
+struct NoOpEvaluationCallback : EvaluationCallback {
+  virtual ~NoOpEvaluationCallback() {}
+  virtual void PrepareForEvaluation(bool evaluate_jacobians,
+                                    bool new_evaluation_point) {
+    (void) evaluate_jacobians;
+    (void) new_evaluation_point;
+  }
+};
+
+TEST(Solver, UpdateStateEveryIterationOption) {
+  double x = 50.0;
+  const double original_x = x;
+
+  std::unique_ptr<CostFunction> cost_function(QuadraticCostFunctor::Create());
+  Problem::Options problem_options;
+  problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+  Problem problem(problem_options);
+  problem.AddResidualBlock(cost_function.get(), NULL, &x);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+
+  RememberingCallback callback(&x);
+  options.callbacks.push_back(&callback);
+
+  Solver::Summary summary;
+
+  int num_iterations;
+
+  // There are four cases that need to be checked:
+  //
+  //   (update_state_every_iteration = true|false) X
+  //   (evaluation_callback = NULL|provided)
+  //
+  // These need to get checked since there is some interaction between them.
+
+  // First: update_state_every_iteration=false, evaluation_callback=NULL.
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  for (int i = 0; i < callback.x_values.size(); ++i) {
+    EXPECT_EQ(50.0, callback.x_values[i]);
+  }
+
+  // Second: update_state_every_iteration=true, evaluation_callback=NULL.
+  x = 50.0;
+  options.update_state_every_iteration = true;
+  callback.x_values.clear();
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  EXPECT_EQ(original_x, callback.x_values[0]);
+  EXPECT_NE(original_x, callback.x_values[1]);
+
+  NoOpEvaluationCallback evaluation_callback;
+
+  // Third: update_state_every_iteration=true, evaluation_callback=!NULL.
+  x = 50.0;
+  options.update_state_every_iteration = true;
+  options.evaluation_callback = &evaluation_callback;
+  callback.x_values.clear();
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  EXPECT_EQ(original_x, callback.x_values[0]);
+  EXPECT_NE(original_x, callback.x_values[1]);
+
+  // Fourth: update_state_every_iteration=false, evaluation_callback=!NULL.
+  x = 50.0;
+  options.update_state_every_iteration = false;
+  options.evaluation_callback = &evaluation_callback;
+  callback.x_values.clear();
+  Solve(options, &problem, &summary);
+  num_iterations = summary.num_successful_steps +
+                   summary.num_unsuccessful_steps;
+  EXPECT_GT(num_iterations, 1);
+  EXPECT_EQ(original_x, callback.x_values[0]);
+  EXPECT_NE(original_x, callback.x_values[1]);
+}
+
+// The parameters must be in separate blocks so that they can be individually
+// set constant or not.
+struct Quadratic4DCostFunction {
+  template <typename T> bool operator()(const T* const x,
+                                        const T* const y,
+                                        const T* const z,
+                                        const T* const w,
+                                        T* residual) const {
+    // A 4-dimension axis-aligned quadratic.
+    residual[0] = T(10.0) - *x +
+                  T(20.0) - *y +
+                  T(30.0) - *z +
+                  T(40.0) - *w;
+    return true;
+  }
+
+  static CostFunction* Create() {
+    return new AutoDiffCostFunction<Quadratic4DCostFunction, 1, 1, 1, 1, 1>(
+        new Quadratic4DCostFunction);
+  }
+};
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    residuals[0] = parameters[0][0];
+    if (jacobians != NULL && jacobians[0] != NULL) {
+      jacobians[0][0] = 1.0;
+    }
+    return true;
+  }
+};
+
+TEST(Solver, TrustRegionProblemHasNoParameterBlocks) {
+  Problem problem;
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasNoParameterBlocks) {
+  Problem problem;
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemHasZeroResiduals) {
+  Problem problem;
+  double x = 1;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasZeroResiduals) {
+  Problem problem;
+  double x = 1;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.message,
+            "Function tolerance reached. "
+            "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemIsConstant) {
+  Problem problem;
+  double x = 1;
+  problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  options.minimizer_type = TRUST_REGION;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+  EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+TEST(Solver, LineSearchProblemIsConstant) {
+  Problem problem;
+  double x = 1;
+  problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  options.minimizer_type = LINE_SEARCH;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_EQ(summary.termination_type, CONVERGENCE);
+  EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+  EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+#if defined(CERES_NO_SUITESPARSE)
+TEST(Solver, SparseNormalCholeskyNoSuiteSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, SparseSchurNoSuiteSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.linear_solver_type = SPARSE_SCHUR;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+#if defined(CERES_NO_CXSPARSE)
+TEST(Solver, SparseNormalCholeskyNoCXSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, SparseSchurNoCXSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.linear_solver_type = SPARSE_SCHUR;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+#if defined(CERES_NO_ACCELERATE_SPARSE)
+TEST(Solver, SparseNormalCholeskyNoAccelerateSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, SparseSchurNoAccelerateSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+  options.linear_solver_type = SPARSE_SCHUR;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+#if !defined(CERES_USE_EIGEN_SPARSE)
+TEST(Solver, SparseNormalCholeskyNoEigenSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, SparseSchurNoEigenSparse) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.linear_solver_type = SPARSE_SCHUR;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+TEST(Solver, SparseNormalCholeskyNoSparseLibrary) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = NO_SPARSE;
+  options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, SparseSchurNoSparseLibrary) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = NO_SPARSE;
+  options.linear_solver_type = SPARSE_SCHUR;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, IterativeSchurWithClusterJacobiPerconditionerNoSparseLibrary) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = NO_SPARSE;
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  // Requires SuiteSparse.
+  options.preconditioner_type = CLUSTER_JACOBI;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, IterativeSchurWithClusterTridiagonalPerconditionerNoSparseLibrary) {
+  Solver::Options options;
+  options.sparse_linear_algebra_library_type = NO_SPARSE;
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  // Requires SuiteSparse.
+  options.preconditioner_type = CLUSTER_TRIDIAGONAL;
+  string message;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, IterativeLinearSolverForDogleg) {
+  Solver::Options options;
+  options.trust_region_strategy_type = DOGLEG;
+  string message;
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  EXPECT_FALSE(options.IsValid(&message));
+
+  options.linear_solver_type = CGNR;
+  EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, LinearSolverTypeNormalOperation) {
+  Solver::Options options;
+  options.linear_solver_type = DENSE_QR;
+
+  string message;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = DENSE_SCHUR;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.linear_solver_type = SPARSE_SCHUR;
+#if defined(CERES_NO_SUITESPARSE) &&            \
+    defined(CERES_NO_CXSPARSE) &&               \
+   !defined(CERES_USE_EIGEN_SPARSE)
+  EXPECT_FALSE(options.IsValid(&message));
+#else
+  EXPECT_TRUE(options.IsValid(&message));
+#endif
+
+  options.linear_solver_type = ITERATIVE_SCHUR;
+  EXPECT_TRUE(options.IsValid(&message));
+}
+
+TEST(Solver, CantMixEvaluationCallbackWithInnerIterations) {
+  Solver::Options options;
+  NoOpEvaluationCallback evaluation_callback;
+  string message;
+
+  // Can't combine them.
+  options.use_inner_iterations = true;
+  options.evaluation_callback = &evaluation_callback;
+  EXPECT_FALSE(options.IsValid(&message));
+
+  // Either or none is OK.
+  options.use_inner_iterations = false;
+  options.evaluation_callback = &evaluation_callback;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.use_inner_iterations = true;
+  options.evaluation_callback = NULL;
+  EXPECT_TRUE(options.IsValid(&message));
+
+  options.use_inner_iterations = false;
+  options.evaluation_callback = NULL;
+  EXPECT_TRUE(options.IsValid(&message));
+}
+
+template <int kNumResiduals, int... Ns>
+class DummyCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
+ public:
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    for (int i = 0; i < kNumResiduals; ++i) {
+      residuals[i] = kNumResiduals * kNumResiduals + i;
+    }
+
+    return true;
+  }
+};
+
+TEST(Solver, FixedCostForConstantProblem) {
+  double x = 1.0;
+  Problem problem;
+  problem.AddResidualBlock(new DummyCostFunction<2, 1>(), NULL, &x);
+  problem.SetParameterBlockConstant(&x);
+  const double expected_cost = 41.0 / 2.0;  // 1/2 * ((4 + 0)^2 + (4 + 1)^2)
+  Solver::Options options;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_TRUE(summary.IsSolutionUsable());
+  EXPECT_EQ(summary.fixed_cost, expected_cost);
+  EXPECT_EQ(summary.initial_cost, expected_cost);
+  EXPECT_EQ(summary.final_cost, expected_cost);
+  EXPECT_EQ(summary.iterations.size(), 0);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/solver_utils.cc b/internal/ceres/solver_utils.cc
new file mode 100644
index 0000000..177a928
--- /dev/null
+++ b/internal/ceres/solver_utils.cc
@@ -0,0 +1,92 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <string>
+
+#include "ceres/internal/config.h"
+
+#include "Eigen/Core"
+#include "ceres/internal/port.h"
+#include "ceres/solver_utils.h"
+#include "ceres/version.h"
+
+namespace ceres {
+namespace internal {
+
+#define CERES_EIGEN_VERSION                                          \
+  CERES_TO_STRING(EIGEN_WORLD_VERSION) "."                           \
+  CERES_TO_STRING(EIGEN_MAJOR_VERSION) "."                           \
+  CERES_TO_STRING(EIGEN_MINOR_VERSION)
+
+std::string VersionString() {
+  std::string value = std::string(CERES_VERSION_STRING);
+  value += "-eigen-(" + std::string(CERES_EIGEN_VERSION) + ")";
+
+#ifdef CERES_NO_LAPACK
+  value += "-no_lapack";
+#else
+  value += "-lapack";
+#endif
+
+#ifndef CERES_NO_SUITESPARSE
+  value += "-suitesparse-(" + std::string(CERES_SUITESPARSE_VERSION) + ")";
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+  value += "-cxsparse-(" + std::string(CERES_CXSPARSE_VERSION) + ")";
+#endif
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+  value += "-acceleratesparse";
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+  value += "-eigensparse";
+#endif
+
+#ifdef CERES_RESTRUCT_SCHUR_SPECIALIZATIONS
+  value += "-no_schur_specializations";
+#endif
+
+#ifdef CERES_USE_OPENMP
+  value += "-openmp";
+#else
+  value += "-no_openmp";
+#endif
+
+#ifdef CERES_NO_CUSTOM_BLAS
+  value += "-no_custom_blas";
+#endif
+
+  return value;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/solver_utils.h b/internal/ceres/solver_utils.h
new file mode 100644
index 0000000..85fbf37
--- /dev/null
+++ b/internal/ceres/solver_utils.h
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include <string>
+
+#include "ceres/iteration_callback.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+template <typename SummaryType>
+bool IsSolutionUsable(const SummaryType& summary) {
+  return (summary.termination_type == CONVERGENCE ||
+          summary.termination_type == NO_CONVERGENCE ||
+          summary.termination_type == USER_SUCCESS);
+}
+
+template <typename SummaryType>
+void SetSummaryFinalCost(SummaryType* summary) {
+  summary->final_cost = summary->initial_cost;
+  // We need the loop here, instead of just looking at the last
+  // iteration because the minimizer maybe making non-monotonic steps.
+  for (int i = 0; i < summary->iterations.size(); ++i) {
+    const IterationSummary& iteration_summary = summary->iterations[i];
+    summary->final_cost = std::min(iteration_summary.cost, summary->final_cost);
+  }
+}
+
+std::string VersionString();
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_cholesky.cc b/internal/ceres/sparse_cholesky.cc
new file mode 100644
index 0000000..3275cc0
--- /dev/null
+++ b/internal/ceres/sparse_cholesky.cc
@@ -0,0 +1,170 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_cholesky.h"
+
+#include "ceres/accelerate_sparse.h"
+#include "ceres/cxsparse.h"
+#include "ceres/eigensparse.h"
+#include "ceres/float_cxsparse.h"
+#include "ceres/float_suitesparse.h"
+#include "ceres/iterative_refiner.h"
+#include "ceres/suitesparse.h"
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> SparseCholesky::Create(
+    const LinearSolver::Options& options) {
+  const OrderingType ordering_type = options.use_postordering ? AMD : NATURAL;
+  std::unique_ptr<SparseCholesky> sparse_cholesky;
+
+  switch (options.sparse_linear_algebra_library_type) {
+    case SUITE_SPARSE:
+#ifndef CERES_NO_SUITESPARSE
+      if (options.use_mixed_precision_solves) {
+        sparse_cholesky = FloatSuiteSparseCholesky::Create(ordering_type);
+      } else {
+        sparse_cholesky = SuiteSparseCholesky::Create(ordering_type);
+      }
+      break;
+#else
+      LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
+#endif
+
+    case EIGEN_SPARSE:
+#ifdef CERES_USE_EIGEN_SPARSE
+      if (options.use_mixed_precision_solves) {
+        sparse_cholesky = FloatEigenSparseCholesky::Create(ordering_type);
+      } else {
+        sparse_cholesky = EigenSparseCholesky::Create(ordering_type);
+      }
+      break;
+#else
+      LOG(FATAL) << "Ceres was compiled without support for "
+                 << "Eigen's sparse Cholesky factorization routines.";
+#endif
+
+    case CX_SPARSE:
+#ifndef CERES_NO_CXSPARSE
+      if (options.use_mixed_precision_solves) {
+        sparse_cholesky = FloatCXSparseCholesky::Create(ordering_type);
+      } else {
+        sparse_cholesky = CXSparseCholesky::Create(ordering_type);
+      }
+      break;
+#else
+      LOG(FATAL) << "Ceres was compiled without support for CXSparse.";
+#endif
+
+    case ACCELERATE_SPARSE:
+#ifndef CERES_NO_ACCELERATE_SPARSE
+      if (options.use_mixed_precision_solves) {
+        sparse_cholesky = AppleAccelerateCholesky<float>::Create(ordering_type);
+      } else {
+        sparse_cholesky = AppleAccelerateCholesky<double>::Create(ordering_type);
+      }
+      break;
+#else
+      LOG(FATAL) << "Ceres was compiled without support for Apple's Accelerate "
+                 << "framework solvers.";
+#endif
+
+    default:
+      LOG(FATAL) << "Unknown sparse linear algebra library type : "
+                 << SparseLinearAlgebraLibraryTypeToString(
+                        options.sparse_linear_algebra_library_type);
+  }
+
+  if (options.max_num_refinement_iterations > 0) {
+    std::unique_ptr<IterativeRefiner> refiner(
+        new IterativeRefiner(options.max_num_refinement_iterations));
+    sparse_cholesky = std::unique_ptr<SparseCholesky>(new RefinedSparseCholesky(
+        std::move(sparse_cholesky), std::move(refiner)));
+  }
+  return sparse_cholesky;
+}
+
+SparseCholesky::~SparseCholesky() {}
+
+LinearSolverTerminationType SparseCholesky::FactorAndSolve(
+    CompressedRowSparseMatrix* lhs,
+    const double* rhs,
+    double* solution,
+    std::string* message) {
+  LinearSolverTerminationType termination_type = Factorize(lhs, message);
+  if (termination_type == LINEAR_SOLVER_SUCCESS) {
+    termination_type = Solve(rhs, solution, message);
+  }
+  return termination_type;
+}
+
+CompressedRowSparseMatrix::StorageType StorageTypeForSparseLinearAlgebraLibrary(
+    SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+  if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+    return CompressedRowSparseMatrix::UPPER_TRIANGULAR;
+  }
+  return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+}
+
+RefinedSparseCholesky::RefinedSparseCholesky(
+    std::unique_ptr<SparseCholesky> sparse_cholesky,
+    std::unique_ptr<IterativeRefiner> iterative_refiner)
+    : sparse_cholesky_(std::move(sparse_cholesky)),
+      iterative_refiner_(std::move(iterative_refiner)) {}
+
+RefinedSparseCholesky::~RefinedSparseCholesky() {}
+
+CompressedRowSparseMatrix::StorageType RefinedSparseCholesky::StorageType()
+    const {
+  return sparse_cholesky_->StorageType();
+}
+
+LinearSolverTerminationType RefinedSparseCholesky::Factorize(
+    CompressedRowSparseMatrix* lhs, std::string* message) {
+  lhs_ = lhs;
+  return sparse_cholesky_->Factorize(lhs, message);
+}
+
+LinearSolverTerminationType RefinedSparseCholesky::Solve(const double* rhs,
+                                                         double* solution,
+                                                         std::string* message) {
+  CHECK(lhs_ != nullptr);
+  auto termination_type = sparse_cholesky_->Solve(rhs, solution, message);
+  if (termination_type != LINEAR_SOLVER_SUCCESS) {
+    return termination_type;
+  }
+
+  iterative_refiner_->Refine(*lhs_, rhs, sparse_cholesky_.get(), solution);
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_cholesky.h b/internal/ceres/sparse_cholesky.h
new file mode 100644
index 0000000..bbe4237
--- /dev/null
+++ b/internal/ceres/sparse_cholesky.h
@@ -0,0 +1,138 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SPARSE_CHOLESKY_H_
+#define CERES_INTERNAL_SPARSE_CHOLESKY_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <memory>
+#include "ceres/linear_solver.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// An interface that abstracts away the internal details of various
+// sparse linear algebra libraries and offers a simple API for solving
+// symmetric positive definite linear systems using a sparse Cholesky
+// factorization.
+//
+// Instances of SparseCholesky are expected to cache the symbolic
+// factorization of the linear system. They do this on the first call
+// to Factorize or FactorAndSolve. Subsequent calls to Factorize and
+// FactorAndSolve are expected to have the same sparsity structure.
+//
+// Example usage:
+//
+//  std::unique_ptr<SparseCholesky>
+//  sparse_cholesky(SparseCholesky::Create(SUITE_SPARSE, AMD));
+//
+//  CompressedRowSparseMatrix lhs = ...;
+//  std::string message;
+//  CHECK_EQ(sparse_cholesky->Factorize(&lhs, &message), LINEAR_SOLVER_SUCCESS);
+//  Vector rhs = ...;
+//  Vector solution = ...;
+//  CHECK_EQ(sparse_cholesky->Solve(rhs.data(), solution.data(), &message),
+//           LINEAR_SOLVER_SUCCESS);
+
+class SparseCholesky {
+ public:
+  static std::unique_ptr<SparseCholesky> Create(
+      const LinearSolver::Options& options);
+
+  virtual ~SparseCholesky();
+
+  // Due to the symmetry of the linear system, sparse linear algebra
+  // libraries only use one half of the input matrix. Whether it is
+  // the upper or the lower triangular part of the matrix depends on
+  // the library and the re-ordering strategy being used. This
+  // function tells the user the storage type expected of the input
+  // matrix for the sparse linear algebra library and reordering
+  // strategy used.
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+
+  // Computes the numeric factorization of the given matrix.  If this
+  // is the first call to Factorize, first the symbolic factorization
+  // will be computed and cached and the numeric factorization will be
+  // computed based on that.
+  //
+  // Subsequent calls to Factorize will use that symbolic
+  // factorization assuming that the sparsity of the matrix has
+  // remained constant.
+  virtual LinearSolverTerminationType Factorize(
+      CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+
+  // Computes the solution to the equation
+  //
+  // lhs * solution = rhs
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message) = 0;
+
+  // Convenience method which combines a call to Factorize and
+  // Solve. Solve is only called if Factorize returns
+  // LINEAR_SOLVER_SUCCESS.
+  virtual LinearSolverTerminationType FactorAndSolve(
+      CompressedRowSparseMatrix* lhs,
+      const double* rhs,
+      double* solution,
+      std::string* message);
+
+};
+
+class IterativeRefiner;
+
+// Computes an initial solution using the given instance of
+// SparseCholesky, and then refines it using the IterativeRefiner.
+class RefinedSparseCholesky : public SparseCholesky {
+ public:
+  RefinedSparseCholesky(std::unique_ptr<SparseCholesky> sparse_cholesky,
+                        std::unique_ptr<IterativeRefiner> iterative_refiner);
+  virtual ~RefinedSparseCholesky();
+
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const;
+  virtual LinearSolverTerminationType Factorize(
+      CompressedRowSparseMatrix* lhs, std::string* message);
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message);
+
+ private:
+  std::unique_ptr<SparseCholesky> sparse_cholesky_;
+  std::unique_ptr<IterativeRefiner> iterative_refiner_;
+  CompressedRowSparseMatrix* lhs_ = nullptr;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SPARSE_CHOLESKY_H_
diff --git a/internal/ceres/sparse_cholesky_test.cc b/internal/ceres/sparse_cholesky_test.cc
new file mode 100644
index 0000000..4c1f6d8
--- /dev/null
+++ b/internal/ceres/sparse_cholesky_test.cc
@@ -0,0 +1,346 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_cholesky.h"
+
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/iterative_refiner.h"
+#include "ceres/random.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+BlockSparseMatrix* CreateRandomFullRankMatrix(const int num_col_blocks,
+                                              const int min_col_block_size,
+                                              const int max_col_block_size,
+                                              const double block_density) {
+  // Create a random matrix
+  BlockSparseMatrix::RandomMatrixOptions options;
+  options.num_col_blocks = num_col_blocks;
+  options.min_col_block_size = min_col_block_size;
+  options.max_col_block_size = max_col_block_size;
+
+  options.num_row_blocks = 2 * num_col_blocks;
+  options.min_row_block_size = 1;
+  options.max_row_block_size = max_col_block_size;
+  options.block_density = block_density;
+  std::unique_ptr<BlockSparseMatrix> random_matrix(
+      BlockSparseMatrix::CreateRandomMatrix(options));
+
+  // Add a diagonal block sparse matrix to make it full rank.
+  Vector diagonal = Vector::Ones(random_matrix->num_cols());
+  std::unique_ptr<BlockSparseMatrix> block_diagonal(
+      BlockSparseMatrix::CreateDiagonalMatrix(
+          diagonal.data(), random_matrix->block_structure()->cols));
+  random_matrix->AppendRows(*block_diagonal);
+  return random_matrix.release();
+}
+
+bool ComputeExpectedSolution(const CompressedRowSparseMatrix& lhs,
+                             const Vector& rhs,
+                             Vector* solution) {
+  Matrix eigen_lhs;
+  lhs.ToDenseMatrix(&eigen_lhs);
+  if (lhs.storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    Matrix full_lhs = eigen_lhs.selfadjointView<Eigen::Upper>();
+    Eigen::LLT<Matrix, Eigen::Upper> llt =
+        eigen_lhs.selfadjointView<Eigen::Upper>().llt();
+    if (llt.info() != Eigen::Success) {
+      return false;
+    }
+    *solution = llt.solve(rhs);
+    return (llt.info() == Eigen::Success);
+  }
+
+  Matrix full_lhs = eigen_lhs.selfadjointView<Eigen::Lower>();
+  Eigen::LLT<Matrix, Eigen::Lower> llt =
+      eigen_lhs.selfadjointView<Eigen::Lower>().llt();
+  if (llt.info() != Eigen::Success) {
+    return false;
+  }
+  *solution = llt.solve(rhs);
+  return (llt.info() == Eigen::Success);
+}
+
+void SparseCholeskySolverUnitTest(
+    const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+    const OrderingType ordering_type,
+    const bool use_block_structure,
+    const int num_blocks,
+    const int min_block_size,
+    const int max_block_size,
+    const double block_density) {
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      sparse_linear_algebra_library_type;
+  sparse_cholesky_options.use_postordering  = (ordering_type == AMD);
+  std::unique_ptr<SparseCholesky> sparse_cholesky = SparseCholesky::Create(
+      sparse_cholesky_options);
+  const CompressedRowSparseMatrix::StorageType storage_type =
+      sparse_cholesky->StorageType();
+
+  std::unique_ptr<BlockSparseMatrix> m(CreateRandomFullRankMatrix(
+      num_blocks, min_block_size, max_block_size, block_density));
+  std::unique_ptr<InnerProductComputer> inner_product_computer(
+      InnerProductComputer::Create(*m, storage_type));
+  inner_product_computer->Compute();
+  CompressedRowSparseMatrix* lhs = inner_product_computer->mutable_result();
+
+  if (!use_block_structure) {
+    lhs->mutable_row_blocks()->clear();
+    lhs->mutable_col_blocks()->clear();
+  }
+
+  Vector rhs = Vector::Random(lhs->num_rows());
+  Vector expected(lhs->num_rows());
+  Vector actual(lhs->num_rows());
+
+  EXPECT_TRUE(ComputeExpectedSolution(*lhs, rhs, &expected));
+  std::string message;
+  EXPECT_EQ(sparse_cholesky->FactorAndSolve(
+                lhs, rhs.data(), actual.data(), &message),
+            LINEAR_SOLVER_SUCCESS);
+  Matrix eigen_lhs;
+  lhs->ToDenseMatrix(&eigen_lhs);
+  EXPECT_NEAR((actual - expected).norm() / actual.norm(),
+              0.0,
+              std::numeric_limits<double>::epsilon() * 20)
+      << "\n"
+      << eigen_lhs;
+}
+
+typedef ::testing::tuple<SparseLinearAlgebraLibraryType, OrderingType, bool>
+    Param;
+
+std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
+  Param param = info.param;
+  std::stringstream ss;
+  ss << SparseLinearAlgebraLibraryTypeToString(::testing::get<0>(param)) << "_"
+     << (::testing::get<1>(param) == AMD ? "AMD" : "NATURAL") << "_"
+     << (::testing::get<2>(param) ? "UseBlockStructure" : "NoBlockStructure");
+  return ss.str();
+}
+
+class SparseCholeskyTest : public ::testing::TestWithParam<Param> {};
+
+TEST_P(SparseCholeskyTest, FactorAndSolve) {
+  SetRandomState(2982);
+  const int kMinNumBlocks = 1;
+  const int kMaxNumBlocks = 10;
+  const int kNumTrials = 10;
+  const int kMinBlockSize = 1;
+  const int kMaxBlockSize = 5;
+
+  for (int num_blocks = kMinNumBlocks; num_blocks < kMaxNumBlocks;
+       ++num_blocks) {
+    for (int trial = 0; trial < kNumTrials; ++trial) {
+      const double block_density = std::max(0.1, RandDouble());
+      Param param = GetParam();
+      SparseCholeskySolverUnitTest(::testing::get<0>(param),
+                                   ::testing::get<1>(param),
+                                   ::testing::get<2>(param),
+                                   num_blocks,
+                                   kMinBlockSize,
+                                   kMaxBlockSize,
+                                   block_density);
+    }
+  }
+}
+
+#ifndef CERES_NO_SUITESPARSE
+INSTANTIATE_TEST_CASE_P(SuiteSparseCholesky,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(SUITE_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+INSTANTIATE_TEST_CASE_P(CXSparseCholesky,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(CX_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+INSTANTIATE_TEST_CASE_P(AccelerateSparseCholesky,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(ACCELERATE_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+
+INSTANTIATE_TEST_CASE_P(AccelerateSparseCholeskySingle,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(ACCELERATE_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+INSTANTIATE_TEST_CASE_P(EigenSparseCholesky,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(EIGEN_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+
+INSTANTIATE_TEST_CASE_P(EigenSparseCholeskySingle,
+                        SparseCholeskyTest,
+                        ::testing::Combine(::testing::Values(EIGEN_SPARSE),
+                                           ::testing::Values(AMD, NATURAL),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+class MockSparseCholesky : public SparseCholesky {
+ public:
+  MOCK_CONST_METHOD0(StorageType, CompressedRowSparseMatrix::StorageType());
+  MOCK_METHOD2(Factorize,
+               LinearSolverTerminationType(CompressedRowSparseMatrix* lhs,
+                                           std::string* message));
+  MOCK_METHOD3(Solve,
+               LinearSolverTerminationType(const double* rhs,
+                                           double* solution,
+                                           std::string* message));
+};
+
+class MockIterativeRefiner : public IterativeRefiner {
+ public:
+  MockIterativeRefiner() : IterativeRefiner(1) {}
+  MOCK_METHOD4(Refine,
+               void (const SparseMatrix& lhs,
+                     const double* rhs,
+                     SparseCholesky* sparse_cholesky,
+                     double* solution));
+};
+
+
+using testing::_;
+using testing::Return;
+
+TEST(RefinedSparseCholesky, StorageType) {
+  MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
+  MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+  EXPECT_CALL(*mock_sparse_cholesky, StorageType())
+      .Times(1)
+      .WillRepeatedly(Return(CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+  EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _))
+      .Times(0);
+  std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
+  std::unique_ptr<IterativeRefiner> iterative_refiner(mock_iterative_refiner);
+  RefinedSparseCholesky refined_sparse_cholesky(std::move(sparse_cholesky),
+                                                std::move(iterative_refiner));
+  EXPECT_EQ(refined_sparse_cholesky.StorageType(),
+            CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+};
+
+TEST(RefinedSparseCholesky, Factorize) {
+  MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
+  MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+  EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
+      .Times(1)
+      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+  EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _))
+      .Times(0);
+  std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
+  std::unique_ptr<IterativeRefiner> iterative_refiner(mock_iterative_refiner);
+  RefinedSparseCholesky refined_sparse_cholesky(std::move(sparse_cholesky),
+                                                std::move(iterative_refiner));
+  CompressedRowSparseMatrix m(1, 1, 1);
+  std::string message;
+  EXPECT_EQ(refined_sparse_cholesky.Factorize(&m, &message),
+            LINEAR_SOLVER_SUCCESS);
+};
+
+TEST(RefinedSparseCholesky, FactorAndSolveWithUnsuccessfulFactorization) {
+  MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
+  MockIterativeRefiner* mock_iterative_refiner = new MockIterativeRefiner;
+  EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
+      .Times(1)
+      .WillRepeatedly(Return(LINEAR_SOLVER_FAILURE));
+  EXPECT_CALL(*mock_sparse_cholesky, Solve(_, _, _))
+      .Times(0);
+  EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _))
+      .Times(0);
+  std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
+  std::unique_ptr<IterativeRefiner> iterative_refiner(mock_iterative_refiner);
+  RefinedSparseCholesky refined_sparse_cholesky(std::move(sparse_cholesky),
+                                                std::move(iterative_refiner));
+  CompressedRowSparseMatrix m(1, 1, 1);
+  std::string message;
+  double rhs;
+  double solution;
+  EXPECT_EQ(refined_sparse_cholesky.FactorAndSolve(&m, &rhs, &solution, &message),
+            LINEAR_SOLVER_FAILURE);
+};
+
+TEST(RefinedSparseCholesky, FactorAndSolveWithSuccess) {
+  MockSparseCholesky* mock_sparse_cholesky = new MockSparseCholesky;
+  std::unique_ptr<MockIterativeRefiner> mock_iterative_refiner(new MockIterativeRefiner);
+  EXPECT_CALL(*mock_sparse_cholesky, Factorize(_, _))
+      .Times(1)
+      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+  EXPECT_CALL(*mock_sparse_cholesky, Solve(_, _, _))
+      .Times(1)
+      .WillRepeatedly(Return(LINEAR_SOLVER_SUCCESS));
+  EXPECT_CALL(*mock_iterative_refiner, Refine(_, _, _, _))
+      .Times(1);
+
+  std::unique_ptr<SparseCholesky> sparse_cholesky(mock_sparse_cholesky);
+  std::unique_ptr<IterativeRefiner> iterative_refiner(std::move(mock_iterative_refiner));
+  RefinedSparseCholesky refined_sparse_cholesky(std::move(sparse_cholesky),
+                                                std::move(iterative_refiner));
+  CompressedRowSparseMatrix m(1, 1, 1);
+  std::string message;
+  double rhs;
+  double solution;
+  EXPECT_EQ(refined_sparse_cholesky.FactorAndSolve(&m, &rhs, &solution, &message),
+            LINEAR_SOLVER_SUCCESS);
+};
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_matrix.cc b/internal/ceres/sparse_matrix.cc
new file mode 100644
index 0000000..f95ff32
--- /dev/null
+++ b/internal/ceres/sparse_matrix.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+SparseMatrix::~SparseMatrix() {
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_matrix.h b/internal/ceres/sparse_matrix.h
new file mode 100644
index 0000000..074d847
--- /dev/null
+++ b/internal/ceres/sparse_matrix.h
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Interface definition for sparse matrices.
+
+#ifndef CERES_INTERNAL_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_SPARSE_MATRIX_H_
+
+#include <cstdio>
+#include "ceres/linear_operator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// This class defines the interface for storing and manipulating
+// sparse matrices. The key property that differentiates different
+// sparse matrices is how they are organized in memory and how the
+// information about the sparsity structure of the matrix is
+// stored. This has significant implications for linear solvers
+// operating on these matrices.
+//
+// To deal with the different kinds of layouts, we will assume that a
+// sparse matrix will have a two part representation. A values array
+// that will be used to store the entries of the sparse matrix and
+// some sort of a layout object that tells the user the sparsity
+// structure and layout of the values array. For example in case of
+// the TripletSparseMatrix, this information is carried in the rows
+// and cols arrays and for the BlockSparseMatrix, this information is
+// carried in the CompressedRowBlockStructure object.
+//
+// This interface deliberately does not contain any information about
+// the structure of the sparse matrix as that seems to be highly
+// matrix type dependent and we are at this stage unable to come up
+// with an efficient high level interface that spans multiple sparse
+// matrix types.
+class SparseMatrix : public LinearOperator {
+ public:
+  virtual ~SparseMatrix();
+
+  // y += Ax;
+  virtual void RightMultiply(const double* x, double* y) const = 0;
+  // y += A'x;
+  virtual void LeftMultiply(const double* x, double* y) const = 0;
+
+  // In MATLAB notation sum(A.*A, 1)
+  virtual void SquaredColumnNorm(double* x) const = 0;
+  // A = A * diag(scale)
+  virtual void ScaleColumns(const double* scale) = 0;
+
+  // A = 0. A->num_nonzeros() == 0 is true after this call. The
+  // sparsity pattern is preserved.
+  virtual void SetZero() = 0;
+
+  // Resize and populate dense_matrix with a dense version of the
+  // sparse matrix.
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const = 0;
+
+  // Write out the matrix as a sequence of (i,j,s) triplets. This
+  // format is useful for loading the matrix into MATLAB/octave as a
+  // sparse matrix.
+  virtual void ToTextFile(FILE* file) const = 0;
+
+  // Accessors for the values array that stores the entries of the
+  // sparse matrix. The exact interpretation of the values of this
+  // array depends on the particular kind of SparseMatrix being
+  // accessed.
+  virtual double* mutable_values() = 0;
+  virtual const double* values() const = 0;
+
+  virtual int num_rows() const = 0;
+  virtual int num_cols() const = 0;
+  virtual int num_nonzeros() const = 0;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SPARSE_MATRIX_H_
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
new file mode 100644
index 0000000..0f2e589
--- /dev/null
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -0,0 +1,114 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_normal_cholesky_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <memory>
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/iterative_refiner.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+SparseNormalCholeskySolver::SparseNormalCholeskySolver(
+    const LinearSolver::Options& options)
+    : options_(options) {
+  sparse_cholesky_ = SparseCholesky::Create(options);
+}
+
+SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {}
+
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
+    BlockSparseMatrix* A,
+    const double* b,
+    const LinearSolver::PerSolveOptions& per_solve_options,
+    double* x) {
+  EventLogger event_logger("SparseNormalCholeskySolver::Solve");
+  LinearSolver::Summary summary;
+  summary.num_iterations = 1;
+  summary.termination_type = LINEAR_SOLVER_SUCCESS;
+  summary.message = "Success.";
+
+  const int num_cols = A->num_cols();
+  VectorRef xref(x, num_cols);
+  xref.setZero();
+  rhs_.resize(num_cols);
+  rhs_.setZero();
+  A->LeftMultiply(b, rhs_.data());
+  event_logger.AddEvent("Compute RHS");
+
+  if (per_solve_options.D != NULL) {
+    // Temporarily append a diagonal block to the A matrix, but undo
+    // it before returning the matrix to the user.
+    std::unique_ptr<BlockSparseMatrix> regularizer;
+    regularizer.reset(BlockSparseMatrix::CreateDiagonalMatrix(
+        per_solve_options.D, A->block_structure()->cols));
+    event_logger.AddEvent("Diagonal");
+    A->AppendRows(*regularizer);
+    event_logger.AddEvent("Append");
+  }
+  event_logger.AddEvent("Append Rows");
+
+  if (inner_product_computer_.get() == NULL) {
+    inner_product_computer_.reset(
+        InnerProductComputer::Create(*A, sparse_cholesky_->StorageType()));
+
+    event_logger.AddEvent("InnerProductComputer::Create");
+  }
+
+  inner_product_computer_->Compute();
+  event_logger.AddEvent("InnerProductComputer::Compute");
+
+  if (per_solve_options.D != NULL) {
+    A->DeleteRowBlocks(A->block_structure()->cols.size());
+  }
+
+  summary.termination_type = sparse_cholesky_->FactorAndSolve(
+      inner_product_computer_->mutable_result(),
+      rhs_.data(),
+      x,
+      &summary.message);
+  event_logger.AddEvent("SparseCholesky::FactorAndSolve");
+  return summary;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
new file mode 100644
index 0000000..95d5436
--- /dev/null
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -0,0 +1,76 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A solver for sparse linear least squares problem based on solving
+// the normal equations via a sparse cholesky factorization.
+
+#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <vector>
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class InnerProductComputer;
+class SparseCholesky;
+
+// Solves the normal equations (A'A + D'D) x = A'b, using the sparse
+// linear algebra library of the user's choice.
+class SparseNormalCholeskySolver : public BlockSparseMatrixSolver {
+ public:
+  explicit SparseNormalCholeskySolver(const LinearSolver::Options& options);
+  SparseNormalCholeskySolver(const SparseNormalCholeskySolver&) = delete;
+  void operator=(const SparseNormalCholeskySolver&) = delete;
+
+  virtual ~SparseNormalCholeskySolver();
+
+ private:
+  virtual LinearSolver::Summary SolveImpl(
+      BlockSparseMatrix* A,
+      const double* b,
+      const LinearSolver::PerSolveOptions& options,
+      double* x);
+
+  const LinearSolver::Options options_;
+  Vector rhs_;
+  std::unique_ptr<SparseCholesky> sparse_cholesky_;
+  std::unique_ptr<InnerProductComputer> inner_product_computer_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/sparse_normal_cholesky_solver_test.cc b/internal/ceres/sparse_normal_cholesky_solver_test.cc
new file mode 100644
index 0000000..c4b4a0b
--- /dev/null
+++ b/internal/ceres/sparse_normal_cholesky_solver_test.cc
@@ -0,0 +1,206 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+#include "Eigen/Cholesky"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): These tests needs to be re-written, since
+// SparseNormalCholeskySolver is a composition of two classes now,
+// InnerProductComputer and SparseCholesky.
+//
+// So the test should exercise the composition, rather than the
+// numerics of the solver, which are well covered by tests for those
+// classes.
+class SparseNormalCholeskySolverTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    std::unique_ptr<LinearLeastSquaresProblem> problem(
+        CreateLinearLeastSquaresProblemFromId(2));
+
+    CHECK(problem != nullptr);
+    A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+    b_.reset(problem->b.release());
+    D_.reset(problem->D.release());
+  }
+
+  void TestSolver(const LinearSolver::Options& options, double* D) {
+    Matrix dense_A;
+    A_->ToDenseMatrix(&dense_A);
+    Matrix lhs = dense_A.transpose() * dense_A;
+    if (D != NULL) {
+      lhs += (ConstVectorRef(D, A_->num_cols()).array() *
+              ConstVectorRef(D, A_->num_cols()).array())
+                 .matrix()
+                 .asDiagonal();
+    }
+
+    Vector rhs(A_->num_cols());
+    rhs.setZero();
+    A_->LeftMultiply(b_.get(), rhs.data());
+    Vector expected_solution = lhs.llt().solve(rhs);
+
+    std::unique_ptr<LinearSolver> solver(LinearSolver::Create(options));
+    LinearSolver::PerSolveOptions per_solve_options;
+    per_solve_options.D = D;
+    Vector actual_solution(A_->num_cols());
+    LinearSolver::Summary summary;
+    summary = solver->Solve(
+        A_.get(), b_.get(), per_solve_options, actual_solution.data());
+
+    EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
+
+    for (int i = 0; i < A_->num_cols(); ++i) {
+      EXPECT_NEAR(expected_solution(i), actual_solution(i), 1e-8)
+          << "\nExpected: " << expected_solution.transpose()
+          << "\nActual: " << actual_solution.transpose();
+    }
+  }
+
+  void TestSolver(const LinearSolver::Options& options) {
+    TestSolver(options, NULL);
+    TestSolver(options, D_.get());
+  }
+
+  std::unique_ptr<BlockSparseMatrix> A_;
+  std::unique_ptr<double[]> b_;
+  std::unique_ptr<double[]> D_;
+};
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingSuiteSparsePreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingSuiteSparsePostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingCXSparsePreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingCXSparsePostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = CX_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+#endif
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingAccelerateSparsePreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingAcceleratePostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingEigenPreOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = false;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+
+TEST_F(SparseNormalCholeskySolverTest,
+       SparseNormalCholeskyUsingEigenPostOrdering) {
+  LinearSolver::Options options;
+  options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  options.type = SPARSE_NORMAL_CHOLESKY;
+  options.use_postordering = true;
+  ContextImpl context;
+  options.context = &context;
+  TestSolver(options);
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/split.cc b/internal/ceres/split.cc
new file mode 100644
index 0000000..3a09e86
--- /dev/null
+++ b/internal/ceres/split.cc
@@ -0,0 +1,123 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/split.h"
+
+#include <iterator>
+#include <string>
+#include <vector>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+// If we know how much to allocate for a vector of strings, we can allocate the
+// vector<string> only once and directly to the right size. This saves in
+// between 33-66 % of memory space needed for the result, and runs faster in the
+// microbenchmarks.
+//
+// The reserve is only implemented for the single character delim.
+//
+// The implementation for counting is cut-and-pasted from
+// SplitStringToIteratorUsing. I could have written my own counting iterator,
+// and use the existing template function, but probably this is more clear and
+// more sure to get optimized to reasonable code.
+static int CalculateReserveForVector(const string& full, const char* delim) {
+  int count = 0;
+  if (delim[0] != '\0' && delim[1] == '\0') {
+    // Optimize the common case where delim is a single character.
+    char c = delim[0];
+    const char* p = full.data();
+    const char* end = p + full.size();
+    while (p != end) {
+      if (*p == c) {  // This could be optimized with hasless(v,1) trick.
+        ++p;
+      } else {
+        while (++p != end && *p != c) {
+          // Skip to the next occurence of the delimiter.
+        }
+        ++count;
+      }
+    }
+  }
+  return count;
+}
+
+template <typename StringType, typename ITR>
+static inline
+void SplitStringToIteratorUsing(const StringType& full,
+                                const char* delim,
+                                ITR& result) {
+  // Optimize the common case where delim is a single character.
+  if (delim[0] != '\0' && delim[1] == '\0') {
+    char c = delim[0];
+    const char* p = full.data();
+    const char* end = p + full.size();
+    while (p != end) {
+      if (*p == c) {
+        ++p;
+      } else {
+        const char* start = p;
+        while (++p != end && *p != c) {
+          // Skip to the next occurence of the delimiter.
+        }
+        *result++ = StringType(start, p - start);
+      }
+    }
+    return;
+  }
+
+  string::size_type begin_index, end_index;
+  begin_index = full.find_first_not_of(delim);
+  while (begin_index != string::npos) {
+    end_index = full.find_first_of(delim, begin_index);
+    if (end_index == string::npos) {
+      *result++ = full.substr(begin_index);
+      return;
+    }
+    *result++ = full.substr(begin_index, (end_index - begin_index));
+    begin_index = full.find_first_not_of(delim, end_index);
+  }
+}
+
+void SplitStringUsing(const string& full,
+                      const char* delim,
+                      vector<string>* result) {
+  result->reserve(result->size() + CalculateReserveForVector(full, delim));
+  std::back_insert_iterator<vector<string>> it(*result);
+  SplitStringToIteratorUsing(full, delim, it);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/split.h b/internal/ceres/split.h
new file mode 100644
index 0000000..94b773d
--- /dev/null
+++ b/internal/ceres/split.h
@@ -0,0 +1,50 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_SPLIT_H_
+#define CERES_INTERNAL_SPLIT_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Split a string using one or more character delimiters, presented as a
+// nul-terminated c string. Append the components to 'result'. If there are
+// consecutive delimiters, this function skips over all of them.
+void SplitStringUsing(const std::string& full, const char* delim,
+                      std::vector<std::string>* res);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SPLIT_H_
diff --git a/internal/ceres/stl_util.h b/internal/ceres/stl_util.h
new file mode 100644
index 0000000..0595a4c
--- /dev/null
+++ b/internal/ceres/stl_util.h
@@ -0,0 +1,91 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_STL_UTIL_H_
+#define CERES_INTERNAL_STL_UTIL_H_
+
+#include <algorithm>
+
+namespace ceres {
+
+// STLDeleteContainerPointers()
+//  For a range within a container of pointers, calls delete
+//  (non-array version) on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin,
+                                ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete *temp;
+  }
+}
+
+// Variant of STLDeleteContainerPointers which allows the container to
+// contain duplicates.
+template <class ForwardIterator>
+void STLDeleteUniqueContainerPointers(ForwardIterator begin,
+                                      ForwardIterator end) {
+  sort(begin, end);
+  ForwardIterator new_end = unique(begin, end);
+  while (begin != new_end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete *temp;
+  }
+}
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container.  This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// ElementDeleter (defined below), which ensures that your container's elements
+// are deleted when the ElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T *container) {
+  if (!container) return;
+  STLDeleteContainerPointers(container->begin(), container->end());
+  container->clear();
+}
+
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_STL_UTIL_H_
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
new file mode 100644
index 0000000..b3b7474
--- /dev/null
+++ b/internal/ceres/stringprintf.cc
@@ -0,0 +1,145 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#include "ceres/stringprintf.h"
+
+#include <cerrno>
+#include <cstdarg>  // For va_list and related operations
+#include <cstdio>   // MSVC requires this for _vsnprintf
+#include <string>
+#include <vector>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+// va_copy() was defined in the C99 standard.  However, it did not appear in the
+// C++ standard until C++11.  This means that if Ceres is being compiled with a
+// strict pre-C++11 standard (e.g. -std=c++03), va_copy() will NOT be defined,
+// as we are using the C++ compiler (it would however be defined if we were
+// using the C compiler).  Note however that both GCC & Clang will in fact
+// define va_copy() when compiling for C++ if the C++ standard is not explicitly
+// specified (i.e. no -std=c++<XX> arg), even though it should not strictly be
+// defined unless -std=c++11 (or greater) was passed.
+#if !defined(va_copy)
+#if defined (__GNUC__)
+// On GCC/Clang, if va_copy() is not defined (C++ standard < C++11 explicitly
+// specified), use the internal __va_copy() version, which should be present
+// in even very old GCC versions.
+#define va_copy(d, s) __va_copy(d, s)
+#else
+// Some older versions of MSVC do not have va_copy(), in which case define it.
+// Although this is required for older MSVC versions, it should also work for
+// other non-GCC/Clang compilers which also do not defined va_copy().
+#define va_copy(d, s) ((d) = (s))
+#endif  // defined (__GNUC__)
+#endif  // !defined(va_copy)
+
+void StringAppendV(string* dst, const char* format, va_list ap) {
+  // First try with a small fixed size buffer
+  char space[1024];
+
+  // It's possible for methods that use a va_list to invalidate
+  // the data in it upon use.  The fix is to make a copy
+  // of the structure before using it and use that copy instead.
+  va_list backup_ap;
+  va_copy(backup_ap, ap);
+  int result = vsnprintf(space, sizeof(space), format, backup_ap);
+  va_end(backup_ap);
+
+  if (result < sizeof(space)) {
+    if (result >= 0) {
+      // Normal case -- everything fit.
+      dst->append(space, result);
+      return;
+    }
+
+#if defined (_MSC_VER)
+    // Error or MSVC running out of space.  MSVC 8.0 and higher
+    // can be asked about space needed with the special idiom below:
+    va_copy(backup_ap, ap);
+    result = vsnprintf(NULL, 0, format, backup_ap);
+    va_end(backup_ap);
+#endif
+
+    if (result < 0) {
+      // Just an error.
+      return;
+    }
+  }
+
+  // Increase the buffer size to the size requested by vsnprintf,
+  // plus one for the closing \0.
+  int length = result+1;
+  char* buf = new char[length];
+
+  // Restore the va_list before we use it again
+  va_copy(backup_ap, ap);
+  result = vsnprintf(buf, length, format, backup_ap);
+  va_end(backup_ap);
+
+  if (result >= 0 && result < length) {
+    // It fit
+    dst->append(buf, result);
+  }
+  delete[] buf;
+}
+
+
+string StringPrintf(const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  string result;
+  StringAppendV(&result, format, ap);
+  va_end(ap);
+  return result;
+}
+
+const string& SStringPrintf(string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  dst->clear();
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+  return *dst;
+}
+
+void StringAppendF(string* dst, const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  StringAppendV(dst, format, ap);
+  va_end(ap);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/stringprintf.h b/internal/ceres/stringprintf.h
new file mode 100644
index 0000000..feeb9c2
--- /dev/null
+++ b/internal/ceres/stringprintf.h
@@ -0,0 +1,89 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// Printf variants that place their output in a C++ string.
+//
+// Usage:
+//      string result = StringPrintf("%d %s\n", 10, "hello");
+//      SStringPrintf(&result, "%d %s\n", 10, "hello");
+//      StringAppendF(&result, "%d %s\n", 20, "there");
+
+#ifndef CERES_INTERNAL_STRINGPRINTF_H_
+#define CERES_INTERNAL_STRINGPRINTF_H_
+
+#include <cstdarg>
+#include <string>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+#if (defined(__GNUC__) || defined(__clang__))
+// Tell the compiler to do printf format string checking if the compiler
+// supports it; see the 'format' attribute in
+// <http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Function-Attributes.html>.
+//
+// N.B.: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+    __attribute__((__format__ (__printf__, string_index, first_to_check)))
+#define CERES_SCANF_ATTRIBUTE(string_index, first_to_check) \
+    __attribute__((__format__ (__scanf__, string_index, first_to_check)))
+#else
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// Return a C++ string.
+extern std::string StringPrintf(const char* format, ...)
+    // Tell the compiler to do printf format string checking.
+    CERES_PRINTF_ATTRIBUTE(1, 2);
+
+// Store result into a supplied string and return it.
+extern const std::string& SStringPrintf(std::string* dst, const char* format, ...)
+    // Tell the compiler to do printf format string checking.
+    CERES_PRINTF_ATTRIBUTE(2, 3);
+
+// Append result to a supplied string.
+extern void StringAppendF(std::string* dst, const char* format, ...)
+    // Tell the compiler to do printf format string checking.
+    CERES_PRINTF_ATTRIBUTE(2, 3);
+
+// Lower-level routine that takes a va_list and appends to a specified string.
+// All other routines are just convenience wrappers around it.
+extern void StringAppendV(std::string* dst, const char* format, va_list ap);
+
+#undef CERES_PRINTF_ATTRIBUTE
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_STRINGPRINTF_H_
diff --git a/internal/ceres/subset_preconditioner.cc b/internal/ceres/subset_preconditioner.cc
new file mode 100644
index 0000000..865c5f1
--- /dev/null
+++ b/internal/ceres/subset_preconditioner.cc
@@ -0,0 +1,115 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/subset_preconditioner.h"
+
+#include <memory>
+#include <string>
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+SubsetPreconditioner::SubsetPreconditioner(
+    const Preconditioner::Options& options, const BlockSparseMatrix& A)
+    : options_(options), num_cols_(A.num_cols()) {
+  CHECK_GE(options_.subset_preconditioner_start_row_block, 0);
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      options_.sparse_linear_algebra_library_type;
+  sparse_cholesky_options.use_postordering =
+      options_.use_postordering;
+  sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
+}
+
+SubsetPreconditioner::~SubsetPreconditioner() {}
+
+void SubsetPreconditioner::RightMultiply(const double* x, double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+  std::string message;
+  sparse_cholesky_->Solve(x, y, &message);
+}
+
+bool SubsetPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+                                      const double* D) {
+  BlockSparseMatrix* m = const_cast<BlockSparseMatrix*>(&A);
+  const CompressedRowBlockStructure* bs = m->block_structure();
+
+  // A = [P]
+  //     [Q]
+
+  // Now add D to A if needed.
+  if (D != NULL) {
+    // A = [P]
+    //     [Q]
+    //     [D]
+    std::unique_ptr<BlockSparseMatrix> regularizer(
+        BlockSparseMatrix::CreateDiagonalMatrix(D, bs->cols));
+    m->AppendRows(*regularizer);
+  }
+
+  if (inner_product_computer_.get() == NULL) {
+    inner_product_computer_.reset(InnerProductComputer::Create(
+        *m,
+        options_.subset_preconditioner_start_row_block,
+        bs->rows.size(),
+        sparse_cholesky_->StorageType()));
+  }
+
+  // Compute inner_product = [Q'*Q + D'*D]
+  inner_product_computer_->Compute();
+
+  // Unappend D if needed.
+  if (D != NULL) {
+    // A = [P]
+    //     [Q]
+    m->DeleteRowBlocks(bs->cols.size());
+  }
+
+  std::string message;
+  // Compute L. s.t., LL' = Q'*Q + D'*D
+  const LinearSolverTerminationType termination_type =
+      sparse_cholesky_->Factorize(inner_product_computer_->mutable_result(),
+                                  &message);
+  if (termination_type != LINEAR_SOLVER_SUCCESS) {
+    LOG(ERROR) << "Preconditioner factorization failed: " << message;
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/subset_preconditioner.h b/internal/ceres/subset_preconditioner.h
new file mode 100644
index 0000000..77c3d91
--- /dev/null
+++ b/internal/ceres/subset_preconditioner.h
@@ -0,0 +1,91 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
+#define CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
+
+#include <memory>
+#include "ceres/preconditioner.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class SparseCholesky;
+class InnerProductComputer;
+
+// Subset preconditioning, uses a subset of the rows of the Jacobian
+// to construct a preconditioner for the normal equations.
+//
+// To keep the interface simple, we assume that the matrix A has
+// already been re-ordered that the user wishes to some subset of the
+// bottom row blocks of the matrix as the preconditioner. This is
+// controlled by
+// Preconditioner::Options::subset_preconditioner_start_row_block.
+//
+// When using the subset preconditioner, all row blocks starting
+// from this row block are used to construct the preconditioner.
+//
+// More precisely the matrix A is horizontally partitioned as
+//
+// A = [P]
+//     [Q]
+//
+// where P as subset_preconditioner_start_row_block row blocks,
+// and the preconditioner is the inverse of the matrix Q'Q.
+//
+// Obviously, the smaller this number, the more accurate and
+// computationally expensive this preconditioner will be.
+//
+// See the tests for example usage.
+class SubsetPreconditioner : public BlockSparseMatrixPreconditioner {
+ public:
+  SubsetPreconditioner(const Preconditioner::Options& options,
+                       const BlockSparseMatrix& A);
+  virtual ~SubsetPreconditioner();
+
+  // Preconditioner interface
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual int num_rows() const { return num_cols_; }
+  virtual int num_cols() const { return num_cols_; }
+
+ private:
+  virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+
+  const Preconditioner::Options options_;
+  const int num_cols_;
+  std::unique_ptr<SparseCholesky> sparse_cholesky_;
+  std::unique_ptr<InnerProductComputer> inner_product_computer_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
diff --git a/internal/ceres/subset_preconditioner_test.cc b/internal/ceres/subset_preconditioner_test.cc
new file mode 100644
index 0000000..0285680
--- /dev/null
+++ b/internal/ceres/subset_preconditioner_test.cc
@@ -0,0 +1,202 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <memory>
+#include "ceres/subset_preconditioner.h"
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): Refactor the following two functions out of
+// here and sparse_cholesky_test.cc into a more suitable place.
+template <int UpLoType>
+bool SolveLinearSystemUsingEigen(const Matrix& lhs,
+                                 const Vector rhs,
+                                 Vector* solution) {
+  Eigen::LLT<Matrix, UpLoType> llt = lhs.selfadjointView<UpLoType>().llt();
+  if (llt.info() != Eigen::Success) {
+    return false;
+  }
+  *solution = llt.solve(rhs);
+  return (llt.info() == Eigen::Success);
+}
+
+// Use Eigen's Dense Cholesky solver to compute the solution to a
+// sparse linear system.
+bool ComputeExpectedSolution(const CompressedRowSparseMatrix& lhs,
+                             const Vector& rhs,
+                             Vector* solution) {
+  Matrix dense_triangular_lhs;
+  lhs.ToDenseMatrix(&dense_triangular_lhs);
+  if (lhs.storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    Matrix full_lhs = dense_triangular_lhs.selfadjointView<Eigen::Upper>();
+    return SolveLinearSystemUsingEigen<Eigen::Upper>(full_lhs, rhs, solution);
+  }
+  return SolveLinearSystemUsingEigen<Eigen::Lower>(
+      dense_triangular_lhs, rhs, solution);
+}
+
+typedef ::testing::tuple<SparseLinearAlgebraLibraryType, bool> Param;
+
+std::string ParamInfoToString(testing::TestParamInfo<Param> info) {
+  Param param = info.param;
+  std::stringstream ss;
+  ss << SparseLinearAlgebraLibraryTypeToString(::testing::get<0>(param)) << "_"
+     << (::testing::get<1>(param) ? "Diagonal" : "NoDiagonal");
+  return ss.str();
+}
+
+class SubsetPreconditionerTest : public ::testing::TestWithParam<Param> {
+ protected:
+  virtual void SetUp() {
+    BlockSparseMatrix::RandomMatrixOptions options;
+    options.num_col_blocks = 4;
+    options.min_col_block_size = 1;
+    options.max_col_block_size = 4;
+    options.num_row_blocks = 8;
+    options.min_row_block_size = 1;
+    options.max_row_block_size = 4;
+    options.block_density = 0.9;
+
+    m_.reset(BlockSparseMatrix::CreateRandomMatrix(options));
+    start_row_block_ = m_->block_structure()->rows.size();
+
+    // Ensure that the bottom part of the matrix has the same column
+    // block structure.
+    options.col_blocks = m_->block_structure()->cols;
+    b_.reset(BlockSparseMatrix::CreateRandomMatrix(options));
+    m_->AppendRows(*b_);
+
+    // Create a Identity block diagonal matrix with the same column
+    // block structure.
+    diagonal_ = Vector::Ones(m_->num_cols());
+    block_diagonal_.reset(BlockSparseMatrix::CreateDiagonalMatrix(
+        diagonal_.data(), b_->block_structure()->cols));
+
+    // Unconditionally add the block diagonal to the matrix b_,
+    // because either it is either part of b_ to make it full rank, or
+    // we pass the same diagonal matrix later as the parameter D. In
+    // either case the preconditioner matrix is b_' b + D'D.
+    b_->AppendRows(*block_diagonal_);
+    inner_product_computer_.reset(InnerProductComputer::Create(
+        *b_, CompressedRowSparseMatrix::UPPER_TRIANGULAR));
+    inner_product_computer_->Compute();
+  }
+
+  std::unique_ptr<BlockSparseMatrix> m_;
+  std::unique_ptr<BlockSparseMatrix> b_;
+  std::unique_ptr<BlockSparseMatrix> block_diagonal_;
+  std::unique_ptr<InnerProductComputer> inner_product_computer_;
+  std::unique_ptr<Preconditioner> preconditioner_;
+  Vector diagonal_;
+  int start_row_block_;
+};
+
+TEST_P(SubsetPreconditionerTest, foo) {
+  Param param = GetParam();
+  Preconditioner::Options options;
+  options.subset_preconditioner_start_row_block = start_row_block_;
+  options.sparse_linear_algebra_library_type = ::testing::get<0>(param);
+  preconditioner_.reset(new SubsetPreconditioner(options, *m_));
+
+  const bool with_diagonal = ::testing::get<1>(param);
+  if (!with_diagonal) {
+    m_->AppendRows(*block_diagonal_);
+  }
+
+  EXPECT_TRUE(
+      preconditioner_->Update(*m_, with_diagonal ? diagonal_.data() : NULL));
+
+  // Repeatedly apply the preconditioner to random vectors and check
+  // that the preconditioned value is the same as one obtained by
+  // solving the linear system directly.
+  for (int i = 0; i < 5; ++i) {
+    CompressedRowSparseMatrix* lhs = inner_product_computer_->mutable_result();
+    Vector rhs = Vector::Random(lhs->num_rows());
+    Vector expected(lhs->num_rows());
+    EXPECT_TRUE(ComputeExpectedSolution(*lhs, rhs, &expected));
+
+    Vector actual(lhs->num_rows());
+    preconditioner_->RightMultiply(rhs.data(), actual.data());
+
+    Matrix eigen_lhs;
+    lhs->ToDenseMatrix(&eigen_lhs);
+    EXPECT_NEAR((actual - expected).norm() / actual.norm(),
+                0.0,
+                std::numeric_limits<double>::epsilon() * 10)
+        << "\n"
+        << eigen_lhs << "\n"
+        << expected.transpose() << "\n"
+        << actual.transpose();
+  }
+}
+
+#ifndef CERES_NO_SUITESPARSE
+INSTANTIATE_TEST_CASE_P(SubsetPreconditionerWithSuiteSparse,
+                        SubsetPreconditionerTest,
+                        ::testing::Combine(::testing::Values(SUITE_SPARSE),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+INSTANTIATE_TEST_CASE_P(SubsetPreconditionerWithCXSparse,
+                        SubsetPreconditionerTest,
+                        ::testing::Combine(::testing::Values(CX_SPARSE),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+INSTANTIATE_TEST_CASE_P(SubsetPreconditionerWithAccelerateSparse,
+                        SubsetPreconditionerTest,
+                        ::testing::Combine(::testing::Values(ACCELERATE_SPARSE),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+#ifdef CERES_USE_EIGEN_SPARSE
+INSTANTIATE_TEST_CASE_P(SubsetPreconditionerWithEigenSparse,
+                        SubsetPreconditionerTest,
+                        ::testing::Combine(::testing::Values(EIGEN_SPARSE),
+                                           ::testing::Values(true, false)),
+                        ParamInfoToString);
+#endif
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
new file mode 100644
index 0000000..190d175
--- /dev/null
+++ b/internal/ceres/suitesparse.cc
@@ -0,0 +1,430 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#include "ceres/suitesparse.h"
+
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "cholmod.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+SuiteSparse::SuiteSparse() { cholmod_start(&cc_); }
+
+SuiteSparse::~SuiteSparse() { cholmod_finish(&cc_); }
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrix(TripletSparseMatrix* A) {
+  cholmod_triplet triplet;
+
+  triplet.nrow = A->num_rows();
+  triplet.ncol = A->num_cols();
+  triplet.nzmax = A->max_num_nonzeros();
+  triplet.nnz = A->num_nonzeros();
+  triplet.i = reinterpret_cast<void*>(A->mutable_rows());
+  triplet.j = reinterpret_cast<void*>(A->mutable_cols());
+  triplet.x = reinterpret_cast<void*>(A->mutable_values());
+  triplet.stype = 0;  // Matrix is not symmetric.
+  triplet.itype = CHOLMOD_INT;
+  triplet.xtype = CHOLMOD_REAL;
+  triplet.dtype = CHOLMOD_DOUBLE;
+
+  return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrixTranspose(
+    TripletSparseMatrix* A) {
+  cholmod_triplet triplet;
+
+  triplet.ncol = A->num_rows();  // swap row and columns
+  triplet.nrow = A->num_cols();
+  triplet.nzmax = A->max_num_nonzeros();
+  triplet.nnz = A->num_nonzeros();
+
+  // swap rows and columns
+  triplet.j = reinterpret_cast<void*>(A->mutable_rows());
+  triplet.i = reinterpret_cast<void*>(A->mutable_cols());
+  triplet.x = reinterpret_cast<void*>(A->mutable_values());
+  triplet.stype = 0;  // Matrix is not symmetric.
+  triplet.itype = CHOLMOD_INT;
+  triplet.xtype = CHOLMOD_REAL;
+  triplet.dtype = CHOLMOD_DOUBLE;
+
+  return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+cholmod_sparse SuiteSparse::CreateSparseMatrixTransposeView(
+    CompressedRowSparseMatrix* A) {
+  cholmod_sparse m;
+  m.nrow = A->num_cols();
+  m.ncol = A->num_rows();
+  m.nzmax = A->num_nonzeros();
+  m.nz = nullptr;
+  m.p = reinterpret_cast<void*>(A->mutable_rows());
+  m.i = reinterpret_cast<void*>(A->mutable_cols());
+  m.x = reinterpret_cast<void*>(A->mutable_values());
+  m.z = nullptr;
+
+  if (A->storage_type() == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+    m.stype = 1;
+  } else if (A->storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    m.stype = -1;
+  } else {
+    m.stype = 0;
+  }
+
+  m.itype = CHOLMOD_INT;
+  m.xtype = CHOLMOD_REAL;
+  m.dtype = CHOLMOD_DOUBLE;
+  m.sorted = 1;
+  m.packed = 1;
+
+  return m;
+}
+
+cholmod_dense SuiteSparse::CreateDenseVectorView(const double* x, int size) {
+  cholmod_dense v;
+  v.nrow = size;
+  v.ncol = 1;
+  v.nzmax = size;
+  v.d = size;
+  v.x = const_cast<void*>(reinterpret_cast<const void*>(x));
+  v.xtype = CHOLMOD_REAL;
+  v.dtype = CHOLMOD_DOUBLE;
+  return v;
+}
+
+cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
+                                              int in_size,
+                                              int out_size) {
+  CHECK_LE(in_size, out_size);
+  cholmod_dense* v = cholmod_zeros(out_size, 1, CHOLMOD_REAL, &cc_);
+  if (x != nullptr) {
+    memcpy(v->x, x, in_size * sizeof(*x));
+  }
+  return v;
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A,
+                                             string* message) {
+  // Cholmod can try multiple re-ordering strategies to find a fill
+  // reducing ordering. Here we just tell it use AMD with automatic
+  // matrix dependence choice of supernodal versus simplicial
+  // factorization.
+  cc_.nmethods = 1;
+  cc_.method[0].ordering = CHOLMOD_AMD;
+  cc_.supernodal = CHOLMOD_AUTO;
+
+  cholmod_factor* factor = cholmod_analyze(A, &cc_);
+  if (VLOG_IS_ON(2)) {
+    cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+  }
+
+  if (cc_.status != CHOLMOD_OK) {
+    *message =
+        StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+    return nullptr;
+  }
+
+  CHECK(factor != nullptr);
+  return factor;
+}
+
+cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(cholmod_sparse* A,
+                                                  const vector<int>& row_blocks,
+                                                  const vector<int>& col_blocks,
+                                                  string* message) {
+  vector<int> ordering;
+  if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
+    return nullptr;
+  }
+  return AnalyzeCholeskyWithUserOrdering(A, ordering, message);
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(
+    cholmod_sparse* A, const vector<int>& ordering, string* message) {
+  CHECK_EQ(ordering.size(), A->nrow);
+
+  cc_.nmethods = 1;
+  cc_.method[0].ordering = CHOLMOD_GIVEN;
+
+  cholmod_factor* factor =
+      cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), nullptr, 0, &cc_);
+  if (VLOG_IS_ON(2)) {
+    cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+  }
+  if (cc_.status != CHOLMOD_OK) {
+    *message =
+        StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+    return nullptr;
+  }
+
+  CHECK(factor != nullptr);
+  return factor;
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithNaturalOrdering(
+    cholmod_sparse* A, string* message) {
+  cc_.nmethods = 1;
+  cc_.method[0].ordering = CHOLMOD_NATURAL;
+  cc_.postorder = 0;
+
+  cholmod_factor* factor = cholmod_analyze(A, &cc_);
+  if (VLOG_IS_ON(2)) {
+    cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+  }
+  if (cc_.status != CHOLMOD_OK) {
+    *message =
+        StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+    return nullptr;
+  }
+
+  CHECK(factor != nullptr);
+  return factor;
+}
+
+bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
+                                   const vector<int>& row_blocks,
+                                   const vector<int>& col_blocks,
+                                   vector<int>* ordering) {
+  const int num_row_blocks = row_blocks.size();
+  const int num_col_blocks = col_blocks.size();
+
+  // Arrays storing the compressed column structure of the matrix
+  // incoding the block sparsity of A.
+  vector<int> block_cols;
+  vector<int> block_rows;
+
+  CompressedColumnScalarMatrixToBlockMatrix(reinterpret_cast<const int*>(A->i),
+                                            reinterpret_cast<const int*>(A->p),
+                                            row_blocks,
+                                            col_blocks,
+                                            &block_rows,
+                                            &block_cols);
+  cholmod_sparse_struct block_matrix;
+  block_matrix.nrow = num_row_blocks;
+  block_matrix.ncol = num_col_blocks;
+  block_matrix.nzmax = block_rows.size();
+  block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
+  block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
+  block_matrix.x = nullptr;
+  block_matrix.stype = A->stype;
+  block_matrix.itype = CHOLMOD_INT;
+  block_matrix.xtype = CHOLMOD_PATTERN;
+  block_matrix.dtype = CHOLMOD_DOUBLE;
+  block_matrix.sorted = 1;
+  block_matrix.packed = 1;
+
+  vector<int> block_ordering(num_row_blocks);
+  if (!cholmod_amd(&block_matrix, nullptr, 0, &block_ordering[0], &cc_)) {
+    return false;
+  }
+
+  BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
+  return true;
+}
+
+LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A,
+                                                  cholmod_factor* L,
+                                                  string* message) {
+  CHECK(A != nullptr);
+  CHECK(L != nullptr);
+
+  // Save the current print level and silence CHOLMOD, otherwise
+  // CHOLMOD is prone to dumping stuff to stderr, which can be
+  // distracting when the error (matrix is indefinite) is not a fatal
+  // failure.
+  const int old_print_level = cc_.print;
+  cc_.print = 0;
+
+  cc_.quick_return_if_not_posdef = 1;
+  int cholmod_status = cholmod_factorize(A, L, &cc_);
+  cc_.print = old_print_level;
+
+  switch (cc_.status) {
+    case CHOLMOD_NOT_INSTALLED:
+      *message = "CHOLMOD failure: Method not installed.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    case CHOLMOD_OUT_OF_MEMORY:
+      *message = "CHOLMOD failure: Out of memory.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    case CHOLMOD_TOO_LARGE:
+      *message = "CHOLMOD failure: Integer overflow occurred.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    case CHOLMOD_INVALID:
+      *message = "CHOLMOD failure: Invalid input.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    case CHOLMOD_NOT_POSDEF:
+      *message = "CHOLMOD warning: Matrix not positive definite.";
+      return LINEAR_SOLVER_FAILURE;
+    case CHOLMOD_DSMALL:
+      *message =
+          "CHOLMOD warning: D for LDL' or diag(L) or "
+          "LL' has tiny absolute value.";
+      return LINEAR_SOLVER_FAILURE;
+    case CHOLMOD_OK:
+      if (cholmod_status != 0) {
+        return LINEAR_SOLVER_SUCCESS;
+      }
+
+      *message =
+          "CHOLMOD failure: cholmod_factorize returned false "
+          "but cholmod_common::status is CHOLMOD_OK."
+          "Please report this to ceres-solver@googlegroups.com.";
+      return LINEAR_SOLVER_FATAL_ERROR;
+    default:
+      *message = StringPrintf(
+          "Unknown cholmod return code: %d. "
+          "Please report this to ceres-solver@googlegroups.com.",
+          cc_.status);
+      return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  return LINEAR_SOLVER_FATAL_ERROR;
+}
+
+cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
+                                  cholmod_dense* b,
+                                  string* message) {
+  if (cc_.status != CHOLMOD_OK) {
+    *message = "cholmod_solve failed. CHOLMOD status is not CHOLMOD_OK";
+    return nullptr;
+  }
+
+  return cholmod_solve(CHOLMOD_A, L, b, &cc_);
+}
+
+bool SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+                                                   int* ordering) {
+  return cholmod_amd(matrix, nullptr, 0, ordering, &cc_);
+}
+
+bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
+    cholmod_sparse* matrix, int* constraints, int* ordering) {
+#ifndef CERES_NO_CAMD
+  return cholmod_camd(matrix, nullptr, 0, constraints, ordering, &cc_);
+#else
+  LOG(FATAL) << "Congratulations you have found a bug in Ceres."
+             << "Ceres Solver was compiled with SuiteSparse "
+             << "version 4.1.0 or less. Calling this function "
+             << "in that case is a bug. Please contact the"
+             << "the Ceres Solver developers.";
+  return false;
+#endif
+}
+
+std::unique_ptr<SparseCholesky> SuiteSparseCholesky::Create(
+    const OrderingType ordering_type) {
+  return std::unique_ptr<SparseCholesky>(new SuiteSparseCholesky(ordering_type));
+}
+
+SuiteSparseCholesky::SuiteSparseCholesky(const OrderingType ordering_type)
+    : ordering_type_(ordering_type), factor_(nullptr) {}
+
+SuiteSparseCholesky::~SuiteSparseCholesky() {
+  if (factor_ != nullptr) {
+    ss_.Free(factor_);
+  }
+}
+
+LinearSolverTerminationType SuiteSparseCholesky::Factorize(
+    CompressedRowSparseMatrix* lhs, string* message) {
+  if (lhs == nullptr) {
+    *message = "Failure: Input lhs is NULL.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  cholmod_sparse cholmod_lhs = ss_.CreateSparseMatrixTransposeView(lhs);
+
+  if (factor_ == nullptr) {
+    if (ordering_type_ == NATURAL) {
+      factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&cholmod_lhs, message);
+    } else {
+      if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
+        factor_ = ss_.BlockAnalyzeCholesky(
+            &cholmod_lhs, lhs->col_blocks(), lhs->row_blocks(), message);
+      } else {
+        factor_ = ss_.AnalyzeCholesky(&cholmod_lhs, message);
+      }
+    }
+
+    if (factor_ == nullptr) {
+      return LINEAR_SOLVER_FATAL_ERROR;
+    }
+  }
+
+  return ss_.Cholesky(&cholmod_lhs, factor_, message);
+}
+
+CompressedRowSparseMatrix::StorageType SuiteSparseCholesky::StorageType()
+    const {
+  return ((ordering_type_ == NATURAL)
+              ? CompressedRowSparseMatrix::UPPER_TRIANGULAR
+              : CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+}
+
+LinearSolverTerminationType SuiteSparseCholesky::Solve(const double* rhs,
+                                                       double* solution,
+                                                       string* message) {
+  // Error checking
+  if (factor_ == nullptr) {
+    *message = "Solve called without a call to Factorize first.";
+    return LINEAR_SOLVER_FATAL_ERROR;
+  }
+
+  const int num_cols = factor_->n;
+  cholmod_dense cholmod_rhs = ss_.CreateDenseVectorView(rhs, num_cols);
+  cholmod_dense* cholmod_dense_solution =
+      ss_.Solve(factor_, &cholmod_rhs, message);
+
+  if (cholmod_dense_solution == nullptr) {
+    return LINEAR_SOLVER_FAILURE;
+  }
+
+  memcpy(solution, cholmod_dense_solution->x, num_cols * sizeof(*solution));
+  ss_.Free(cholmod_dense_solution);
+  return LINEAR_SOLVER_SUCCESS;
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
new file mode 100644
index 0000000..7770d9e
--- /dev/null
+++ b/internal/ceres/suitesparse.h
@@ -0,0 +1,338 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A simple C++ interface to the SuiteSparse and CHOLMOD libraries.
+
+#ifndef CERES_INTERNAL_SUITESPARSE_H_
+#define CERES_INTERNAL_SUITESPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include <cstring>
+#include <string>
+#include <vector>
+#include "SuiteSparseQR.hpp"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "cholmod.h"
+#include "glog/logging.h"
+
+// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
+// if SuiteSparse was compiled with Metis support. This makes
+// calling and linking into cholmod_camd problematic even though it
+// has nothing to do with Metis. This has been fixed reliably in
+// 4.2.0.
+//
+// The fix was actually committed in 4.1.0, but there is
+// some confusion about a silent update to the tar ball, so we are
+// being conservative and choosing the next minor version where
+// things are stable.
+#if (SUITESPARSE_VERSION < 4002)
+#define CERES_NO_CAMD
+#endif
+
+// UF_long is deprecated but SuiteSparse_long is only available in
+// newer versions of SuiteSparse. So for older versions of
+// SuiteSparse, we define SuiteSparse_long to be the same as UF_long,
+// which is what recent versions of SuiteSparse do anyways.
+#ifndef SuiteSparse_long
+#define SuiteSparse_long UF_long
+#endif
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+// The raw CHOLMOD and SuiteSparseQR libraries have a slightly
+// cumbersome c like calling format. This object abstracts it away and
+// provides the user with a simpler interface. The methods here cannot
+// be static as a cholmod_common object serves as a global variable
+// for all cholmod function calls.
+class SuiteSparse {
+ public:
+  SuiteSparse();
+  ~SuiteSparse();
+
+  // Functions for building cholmod_sparse objects from sparse
+  // matrices stored in triplet form. The matrix A is not
+  // modifed. Called owns the result.
+  cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A);
+
+  // This function works like CreateSparseMatrix, except that the
+  // return value corresponds to A' rather than A.
+  cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A);
+
+  // Create a cholmod_sparse wrapper around the contents of A. This is
+  // a shallow object, which refers to the contents of A and does not
+  // use the SuiteSparse machinery to allocate memory.
+  cholmod_sparse CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+
+  // Create a cholmod_dense vector around the contents of the array x.
+  // This is a shallow object, which refers to the contents of x and
+  // does not use the SuiteSparse machinery to allocate memory.
+  cholmod_dense CreateDenseVectorView(const double* x, int size);
+
+  // Given a vector x, build a cholmod_dense vector of size out_size
+  // with the first in_size entries copied from x. If x is NULL, then
+  // an all zeros vector is returned. Caller owns the result.
+  cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
+
+  // The matrix A is scaled using the matrix whose diagonal is the
+  // vector scale. mode describes how scaling is applied. Possible
+  // values are CHOLMOD_ROW for row scaling - diag(scale) * A,
+  // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM
+  // for symmetric scaling which scales both the rows and the columns
+  // - diag(scale) * A * diag(scale).
+  void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
+     cholmod_scale(scale, mode, A, &cc_);
+  }
+
+  // Create and return a matrix m = A * A'. Caller owns the
+  // result. The matrix A is not modified.
+  cholmod_sparse* AATranspose(cholmod_sparse* A) {
+    cholmod_sparse*m =  cholmod_aat(A, NULL, A->nrow, 1, &cc_);
+    m->stype = 1;  // Pay attention to the upper triangular part.
+    return m;
+  }
+
+  // y = alpha * A * x + beta * y. Only y is modified.
+  void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
+                           cholmod_dense* x, cholmod_dense* y) {
+    double alpha_[2] = {alpha, 0};
+    double beta_[2] = {beta, 0};
+    cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
+  }
+
+  // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
+  // the fill-in in the Cholesky factorization of the corresponding
+  // matrix. This is done by using the AMD algorithm.
+  //
+  // Using this ordering, the symbolic Cholesky factorization of A (or
+  // AA') is computed and returned.
+  //
+  // A is not modified, only the pattern of non-zeros of A is used,
+  // the actual numerical values in A are of no consequence.
+  //
+  // message contains an explanation of the failures if any.
+  //
+  // Caller owns the result.
+  cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, std::string* message);
+
+  cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
+                                       const std::vector<int>& row_blocks,
+                                       const std::vector<int>& col_blocks,
+                                       std::string* message);
+
+  // If A is symmetric, then compute the symbolic Cholesky
+  // factorization of A(ordering, ordering). If A is unsymmetric, then
+  // compute the symbolic factorization of
+  // A(ordering,:) A(ordering,:)'.
+  //
+  // A is not modified, only the pattern of non-zeros of A is used,
+  // the actual numerical values in A are of no consequence.
+  //
+  // message contains an explanation of the failures if any.
+  //
+  // Caller owns the result.
+  cholmod_factor* AnalyzeCholeskyWithUserOrdering(
+      cholmod_sparse* A,
+      const std::vector<int>& ordering,
+      std::string* message);
+
+  // Perform a symbolic factorization of A without re-ordering A. No
+  // postordering of the elimination tree is performed. This ensures
+  // that the symbolic factor does not introduce an extra permutation
+  // on the matrix. See the documentation for CHOLMOD for more details.
+  //
+  // message contains an explanation of the failures if any.
+  cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A,
+                                                     std::string* message);
+
+  // Use the symbolic factorization in L, to find the numerical
+  // factorization for the matrix A or AA^T. Return true if
+  // successful, false otherwise. L contains the numeric factorization
+  // on return.
+  //
+  // message contains an explanation of the failures if any.
+  LinearSolverTerminationType Cholesky(cholmod_sparse* A,
+                                       cholmod_factor* L,
+                                       std::string* message);
+
+  // Given a Cholesky factorization of a matrix A = LL^T, solve the
+  // linear system Ax = b, and return the result. If the Solve fails
+  // NULL is returned. Caller owns the result.
+  //
+  // message contains an explanation of the failures if any.
+  cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, std::string* message);
+
+  // By virtue of the modeling layer in Ceres being block oriented,
+  // all the matrices used by Ceres are also block oriented. When
+  // doing sparse direct factorization of these matrices the
+  // fill-reducing ordering algorithms (in particular AMD) can either
+  // be run on the block or the scalar form of these matrices. The two
+  // SuiteSparse::AnalyzeCholesky methods allows the client to
+  // compute the symbolic factorization of a matrix by either using
+  // AMD on the matrix or a user provided ordering of the rows.
+  //
+  // But since the underlying matrices are block oriented, it is worth
+  // running AMD on just the block structure of these matrices and then
+  // lifting these block orderings to a full scalar ordering. This
+  // preserves the block structure of the permuted matrix, and exposes
+  // more of the super-nodal structure of the matrix to the numerical
+  // factorization routines.
+  //
+  // Find the block oriented AMD ordering of a matrix A, whose row and
+  // column blocks are given by row_blocks, and col_blocks
+  // respectively. The matrix may or may not be symmetric. The entries
+  // of col_blocks do not need to sum to the number of columns in
+  // A. If this is the case, only the first sum(col_blocks) are used
+  // to compute the ordering.
+  bool BlockAMDOrdering(const cholmod_sparse* A,
+                        const std::vector<int>& row_blocks,
+                        const std::vector<int>& col_blocks,
+                        std::vector<int>* ordering);
+
+  // Find a fill reducing approximate minimum degree
+  // ordering. ordering is expected to be large enough to hold the
+  // ordering.
+  bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
+
+
+  // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
+  // if SuiteSparse was compiled with Metis support. This makes
+  // calling and linking into cholmod_camd problematic even though it
+  // has nothing to do with Metis. This has been fixed reliably in
+  // 4.2.0.
+  //
+  // The fix was actually committed in 4.1.0, but there is
+  // some confusion about a silent update to the tar ball, so we are
+  // being conservative and choosing the next minor version where
+  // things are stable.
+  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
+    return (SUITESPARSE_VERSION > 4001);
+  }
+
+  // Find a fill reducing approximate minimum degree
+  // ordering. constraints is an array which associates with each
+  // column of the matrix an elimination group. i.e., all columns in
+  // group 0 are eliminated first, all columns in group 1 are
+  // eliminated next etc. This function finds a fill reducing ordering
+  // that obeys these constraints.
+  //
+  // Calling ApproximateMinimumDegreeOrdering is equivalent to calling
+  // ConstrainedApproximateMinimumDegreeOrdering with a constraint
+  // array that puts all columns in the same elimination group.
+  //
+  // If CERES_NO_CAMD is defined then calling this function will
+  // result in a crash.
+  bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+                                                   int* constraints,
+                                                   int* ordering);
+
+  void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
+  void Free(cholmod_dense* m)  { cholmod_free_dense(&m, &cc_);  }
+  void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
+
+  void Print(cholmod_sparse* m, const std::string& name) {
+    cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_);
+  }
+
+  void Print(cholmod_dense* m, const std::string& name) {
+    cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_);
+  }
+
+  void Print(cholmod_triplet* m, const std::string& name) {
+    cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_);
+  }
+
+  cholmod_common* mutable_cc() { return &cc_; }
+
+ private:
+  cholmod_common cc_;
+};
+
+class SuiteSparseCholesky : public SparseCholesky {
+ public:
+  static std::unique_ptr<SparseCholesky> Create(
+      OrderingType ordering_type);
+
+  // SparseCholesky interface.
+  virtual ~SuiteSparseCholesky();
+  virtual CompressedRowSparseMatrix::StorageType StorageType() const;
+  virtual LinearSolverTerminationType Factorize(
+      CompressedRowSparseMatrix* lhs, std::string* message);
+  virtual LinearSolverTerminationType Solve(const double* rhs,
+                                            double* solution,
+                                            std::string* message);
+ private:
+  SuiteSparseCholesky(const OrderingType ordering_type);
+
+  const OrderingType ordering_type_;
+  SuiteSparse ss_;
+  cholmod_factor* factor_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#else  // CERES_NO_SUITESPARSE
+
+typedef void cholmod_factor;
+
+namespace ceres {
+namespace internal {
+
+class SuiteSparse {
+ public:
+  // Defining this static function even when SuiteSparse is not
+  // available, allows client code to check for the presence of CAMD
+  // without checking for the absence of the CERES_NO_CAMD symbol.
+  //
+  // This is safer because the symbol maybe missing due to a user
+  // accidentally not including suitesparse.h in their code when
+  // checking for the symbol.
+  static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
+    return false;
+  }
+
+  void Free(void* arg) {}
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_NO_SUITESPARSE
+
+#endif  // CERES_INTERNAL_SUITESPARSE_H_
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
new file mode 100644
index 0000000..3f635d0
--- /dev/null
+++ b/internal/ceres/system_test.cc
@@ -0,0 +1,227 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//
+// End-to-end tests for Ceres using Powell's function.
+
+#include <cmath>
+#include <cstdlib>
+
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/solver.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// This class implements the SystemTestProblem interface and provides
+// access to an implementation of Powell's singular function.
+//
+//   F = 1/2 (f1^2 + f2^2 + f3^2 + f4^2)
+//
+//   f1 = x1 + 10*x2;
+//   f2 = sqrt(5) * (x3 - x4)
+//   f3 = (x2 - 2*x3)^2
+//   f4 = sqrt(10) * (x1 - x4)^2
+//
+// The starting values are x1 = 3, x2 = -1, x3 = 0, x4 = 1.
+// The minimum is 0 at (x1, x2, x3, x4) = 0.
+//
+// From: Testing Unconstrained Optimization Software by Jorge J. More, Burton S.
+// Garbow and Kenneth E. Hillstrom in ACM Transactions on Mathematical Software,
+// Vol 7(1), March 1981.
+class PowellsFunction {
+ public:
+  PowellsFunction() {
+    x_[0] =  3.0;
+    x_[1] = -1.0;
+    x_[2] =  0.0;
+    x_[3] =  1.0;
+
+    problem_.AddResidualBlock(
+        new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), NULL, &x_[0], &x_[1]);
+    problem_.AddResidualBlock(
+        new AutoDiffCostFunction<F2, 1, 1, 1>(new F2), NULL, &x_[2], &x_[3]);
+    problem_.AddResidualBlock(
+        new AutoDiffCostFunction<F3, 1, 1, 1>(new F3), NULL, &x_[1], &x_[2]);
+    problem_.AddResidualBlock(
+        new AutoDiffCostFunction<F4, 1, 1, 1>(new F4), NULL, &x_[0], &x_[3]);
+
+    // Settings for the reference solution.
+    options_.linear_solver_type = ceres::DENSE_QR;
+    options_.max_num_iterations = 10;
+    options_.num_threads = 1;
+  }
+
+  Problem* mutable_problem() { return &problem_; }
+  Solver::Options* mutable_solver_options() { return &options_; }
+
+  static double kResidualTolerance;
+
+ private:
+  // Templated functions used for automatically differentiated cost
+  // functions.
+  class F1 {
+   public:
+    template <typename T> bool operator()(const T* const x1,
+                                          const T* const x2,
+                                          T* residual) const {
+      // f1 = x1 + 10 * x2;
+      *residual = *x1 + 10.0 * *x2;
+      return true;
+    }
+  };
+
+  class F2 {
+   public:
+    template <typename T> bool operator()(const T* const x3,
+                                          const T* const x4,
+                                          T* residual) const {
+      // f2 = sqrt(5) (x3 - x4)
+      *residual = sqrt(5.0) * (*x3 - *x4);
+      return true;
+    }
+  };
+
+  class F3 {
+   public:
+    template <typename T> bool operator()(const T* const x2,
+                                          const T* const x4,
+                                          T* residual) const {
+      // f3 = (x2 - 2 x3)^2
+      residual[0] = (x2[0] - 2.0 * x4[0]) * (x2[0] - 2.0 * x4[0]);
+      return true;
+    }
+  };
+
+  class F4 {
+   public:
+    template <typename T> bool operator()(const T* const x1,
+                                          const T* const x4,
+                                          T* residual) const {
+      // f4 = sqrt(10) (x1 - x4)^2
+      residual[0] = sqrt(10.0) * (x1[0] - x4[0]) * (x1[0] - x4[0]);
+      return true;
+    }
+  };
+
+  double x_[4];
+  Problem problem_;
+  Solver::Options options_;
+};
+
+double PowellsFunction::kResidualTolerance = 1e-8;
+
+typedef SystemTest<PowellsFunction> PowellTest;
+
+TEST_F(PowellTest, DenseQR) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = DENSE_QR;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+
+TEST_F(PowellTest, DenseNormalCholesky) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = DENSE_NORMAL_CHOLESKY;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+
+TEST_F(PowellTest, DenseSchur) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = DENSE_SCHUR;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+
+TEST_F(PowellTest, IterativeSchurWithJacobi) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = ITERATIVE_SCHUR;
+  options->sparse_linear_algebra_library_type = NO_SPARSE;
+  options->preconditioner_type = JACOBI;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(PowellTest, SparseNormalCholeskyUsingSuiteSparse) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  options->sparse_linear_algebra_library_type = SUITE_SPARSE;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+#endif  // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(PowellTest, SparseNormalCholeskyUsingCXSparse) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  options->sparse_linear_algebra_library_type = CX_SPARSE;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+#endif  // CERES_NO_CXSPARSE
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+TEST_F(PowellTest, SparseNormalCholeskyUsingAccelerateSparse) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  options->sparse_linear_algebra_library_type = ACCELERATE_SPARSE;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+#endif  // CERES_NO_ACCELERATE_SPARSE
+
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(PowellTest, SparseNormalCholeskyUsingEigenSparse) {
+  PowellsFunction powells_function;
+  Solver::Options* options = powells_function.mutable_solver_options();
+  options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+  options->sparse_linear_algebra_library_type = EIGEN_SPARSE;
+  RunSolverForConfigAndExpectResidualsMatch(*options,
+                                            powells_function.mutable_problem());
+}
+#endif  // CERES_USE_EIGEN_SPARSE
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/test_util.cc b/internal/ceres/test_util.cc
new file mode 100644
index 0000000..5156856
--- /dev/null
+++ b/internal/ceres/test_util.cc
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Utility functions useful for testing.
+
+#include "ceres/test_util.h"
+
+#include <algorithm>
+#include <cmath>
+#include "ceres/file.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+DECLARE_string(test_srcdir);
+
+// This macro is used to inject additional path information specific
+// to the build system.
+
+#ifndef CERES_TEST_SRCDIR_SUFFIX
+#define CERES_TEST_SRCDIR_SUFFIX ""
+#endif
+
+namespace ceres {
+namespace internal {
+
+bool ExpectClose(double x, double y, double max_abs_relative_difference) {
+  double absolute_difference = fabs(x - y);
+  double relative_difference = absolute_difference / std::max(fabs(x), fabs(y));
+  if (x == 0 || y == 0) {
+    // If x or y is exactly zero, then relative difference doesn't have any
+    // meaning. Take the absolute difference instead.
+    relative_difference = absolute_difference;
+  }
+  if (relative_difference > max_abs_relative_difference) {
+    VLOG(1) << StringPrintf("x=%17g y=%17g abs=%17g rel=%17g",
+                            x, y, absolute_difference, relative_difference);
+  }
+
+  EXPECT_NEAR(relative_difference, 0.0, max_abs_relative_difference);
+  return relative_difference <= max_abs_relative_difference;
+}
+
+void ExpectArraysCloseUptoScale(int n,
+                                const double* p,
+                                const double* q,
+                                double tol) {
+  CHECK_GT(n, 0);
+  CHECK(p);
+  CHECK(q);
+
+  double p_max = 0;
+  double q_max = 0;
+  int p_i = 0;
+  int q_i = 0;
+
+  for (int i = 0; i < n; ++i) {
+    if (std::abs(p[i]) > p_max) {
+      p_max = std::abs(p[i]);
+      p_i = i;
+    }
+    if (std::abs(q[i]) > q_max) {
+      q_max = std::abs(q[i]);
+      q_i = i;
+    }
+  }
+
+  // If both arrays are all zeros, they are equal up to scale, but
+  // for testing purposes, that's more likely to be an error than
+  // a desired result.
+  CHECK_NE(p_max, 0.0);
+  CHECK_NE(q_max, 0.0);
+
+  for (int i = 0; i < n; ++i) {
+    double p_norm = p[i] / p[p_i];
+    double q_norm = q[i] / q[q_i];
+
+    EXPECT_NEAR(p_norm, q_norm, tol) << "i=" << i;
+  }
+}
+
+void ExpectArraysClose(int n,
+                       const double* p,
+                       const double* q,
+                       double tol) {
+  CHECK_GT(n, 0);
+  CHECK(p);
+  CHECK(q);
+
+  for (int i = 0; i < n; ++i) {
+    EXPECT_TRUE(ExpectClose(p[i], q[i], tol))
+        << "p[" << i << "]" << p[i] << " "
+        << "q[" << i << "]" << q[i] << " "
+        << "tol: " << tol;
+  }
+}
+
+std::string TestFileAbsolutePath(const std::string& filename) {
+  return JoinPath(FLAGS_test_srcdir + CERES_TEST_SRCDIR_SUFFIX,
+                  filename);
+}
+
+std::string ToString(const Solver::Options& options) {
+  return StringPrintf(
+      "(%s, %s, %s, %s, %d)",
+      LinearSolverTypeToString(options.linear_solver_type),
+      SparseLinearAlgebraLibraryTypeToString(
+          options.sparse_linear_algebra_library_type),
+      options.linear_solver_ordering? "USER": "AUTOMATIC",
+      PreconditionerTypeToString(options.preconditioner_type),
+      options.num_threads);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/test_util.h b/internal/ceres/test_util.h
new file mode 100644
index 0000000..e43eb35
--- /dev/null
+++ b/internal/ceres/test_util.h
@@ -0,0 +1,129 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_TEST_UTIL_H_
+#define CERES_INTERNAL_TEST_UTIL_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/problem.h"
+#include "ceres/solver.h"
+#include "ceres/stringprintf.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Expects that x and y have a relative difference of no more than
+// max_abs_relative_difference. If either x or y is zero, then the relative
+// difference is interpreted as an absolute difference.
+bool ExpectClose(double x, double y, double max_abs_relative_difference);
+
+// Expects that for all i = 1,.., n - 1
+//
+//   |p[i] - q[i]| / max(|p[i]|, |q[i]|) < tolerance
+void ExpectArraysClose(int n,
+                       const double* p,
+                       const double* q,
+                       double tolerance);
+
+// Expects that for all i = 1,.., n - 1
+//
+//   |p[i] / max_norm_p - q[i] / max_norm_q| < tolerance
+//
+// where max_norm_p and max_norm_q are the max norms of the arrays p
+// and q respectively.
+void ExpectArraysCloseUptoScale(int n,
+                                const double* p,
+                                const double* q,
+                                double tolerance);
+
+// Construct a fully qualified path for the test file depending on the
+// local build/testing environment.
+std::string TestFileAbsolutePath(const std::string& filename);
+
+std::string ToString(const Solver::Options& options);
+
+// A templated test fixture, that is used for testing Ceres end to end
+// by computing a solution to the problem for a given solver
+// configuration and comparing it to a reference solver configuration.
+//
+// It is assumed that the SystemTestProblem has an Solver::Options
+// struct that contains the reference Solver configuration.
+template <typename SystemTestProblem>
+class SystemTest : public ::testing::Test {
+ protected:
+  virtual void SetUp() {
+    SystemTestProblem system_test_problem;
+    SolveAndEvaluateFinalResiduals(
+        *system_test_problem.mutable_solver_options(),
+        system_test_problem.mutable_problem(),
+        &expected_final_residuals_);
+  }
+
+  void RunSolverForConfigAndExpectResidualsMatch(const Solver::Options& options,
+                                                 Problem* problem) {
+    std::vector<double> final_residuals;
+    SolveAndEvaluateFinalResiduals(options, problem, &final_residuals);
+
+    // We compare solutions by comparing their residual vectors. We do
+    // not compare parameter vectors because it is much more brittle
+    // and error prone to do so, since the same problem can have
+    // nearly the same residuals at two completely different positions
+    // in parameter space.
+    CHECK_EQ(expected_final_residuals_.size(), final_residuals.size());
+    for (int i = 0; i < final_residuals.size(); ++i) {
+      EXPECT_NEAR(final_residuals[i],
+                  expected_final_residuals_[i],
+                  SystemTestProblem::kResidualTolerance)
+          << "Not close enough residual:" << i;
+    }
+  }
+
+  void SolveAndEvaluateFinalResiduals(const Solver::Options& options,
+                                      Problem* problem,
+                                      std::vector<double>* final_residuals) {
+    Solver::Summary summary;
+    Solve(options, problem, &summary);
+    CHECK_NE(summary.termination_type, ceres::FAILURE);
+    problem->Evaluate(Problem::EvaluateOptions(),
+                      nullptr,
+                      final_residuals,
+                      nullptr,
+                      nullptr);
+  }
+
+  std::vector<double> expected_final_residuals_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TEST_UTIL_H_
diff --git a/internal/ceres/thread_pool.cc b/internal/ceres/thread_pool.cc
new file mode 100644
index 0000000..991da30
--- /dev/null
+++ b/internal/ceres/thread_pool.cc
@@ -0,0 +1,116 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+
+#include "ceres/thread_pool.h"
+
+#include <cmath>
+#include <limits>
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Constrain the total number of threads to the amount the hardware can support.
+int GetNumAllowedThreads(int requested_num_threads) {
+  return std::min(requested_num_threads, ThreadPool::MaxNumThreadsAvailable());
+}
+
+}  // namespace
+
+int ThreadPool::MaxNumThreadsAvailable() {
+  const int num_hardware_threads = std::thread::hardware_concurrency();
+  // hardware_concurrency() can return 0 if the value is not well defined or not
+  // computable.
+  return num_hardware_threads == 0
+      ? std::numeric_limits<int>::max()
+      : num_hardware_threads;
+}
+
+ThreadPool::ThreadPool() { }
+
+ThreadPool::ThreadPool(int num_threads) {
+  Resize(num_threads);
+}
+
+ThreadPool::~ThreadPool() {
+  std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+  // Signal the thread workers to stop and wait for them to finish all scheduled
+  // tasks.
+  Stop();
+  for (std::thread& thread : thread_pool_) {
+    thread.join();
+  }
+}
+
+void ThreadPool::Resize(int num_threads) {
+  std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+
+  const int num_current_threads = thread_pool_.size();
+  if (num_current_threads >= num_threads) {
+    return;
+  }
+
+  const int create_num_threads =
+      GetNumAllowedThreads(num_threads) - num_current_threads;
+
+  for (int i = 0; i < create_num_threads; ++i) {
+    thread_pool_.push_back(std::thread(&ThreadPool::ThreadMainLoop, this));
+  }
+}
+
+void ThreadPool::AddTask(const std::function<void()>& func) {
+  task_queue_.Push(func);
+}
+
+int ThreadPool::Size() {
+  std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+  return thread_pool_.size();
+}
+
+void ThreadPool::ThreadMainLoop() {
+  std::function<void()> task;
+  while (task_queue_.Wait(&task)) {
+    task();
+  }
+}
+
+void ThreadPool::Stop() {
+  task_queue_.StopWaiters();
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_USE_CXX11_THREADS
diff --git a/internal/ceres/thread_pool.h b/internal/ceres/thread_pool.h
new file mode 100644
index 0000000..1ebb52e
--- /dev/null
+++ b/internal/ceres/thread_pool.h
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_THREAD_POOL_H_
+#define CERES_INTERNAL_THREAD_POOL_H_
+
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include "ceres/concurrent_queue.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread-safe thread pool with an unbounded task queue and a resizable number
+// of workers.  The size of the thread pool can be increased but never decreased
+// in order to support the largest number of threads requested.  The ThreadPool
+// has three states:
+//
+//  (1) The thread pool size is zero.  Tasks may be added to the thread pool via
+//  AddTask but they will not be executed until the thread pool is resized.
+//
+//  (2) The thread pool size is greater than zero.  Tasks may be added to the
+//  thread pool and will be executed as soon as a worker is available.  The
+//  thread pool may be resized while the thread pool is running.
+//
+//  (3) The thread pool is destructing.  The thread pool will signal all the
+//  workers to stop.  The workers will finish all of the tasks that have already
+//  been added to the thread pool.
+//
+class ThreadPool {
+ public:
+  // Returns the maximum number of hardware threads.
+  static int MaxNumThreadsAvailable();
+
+  // Default constructor with no active threads.  We allow instantiating a
+  // thread pool with no threads to support the use case of single threaded
+  // Ceres where everything will be executed on the main thread. For single
+  // threaded execution this has two benefits: avoid any overhead as threads
+  // are expensive to create, and no unused threads shown in the debugger.
+  ThreadPool();
+
+  // Instantiates a thread pool with min(MaxNumThreadsAvailable, num_threads)
+  // number of threads.
+  explicit ThreadPool(int num_threads);
+
+  // Signals the workers to stop and waits for them to finish any tasks that
+  // have been scheduled.
+  ~ThreadPool();
+
+  // Resizes the thread pool if it is currently less than the requested number
+  // of threads.  The thread pool will be resized to min(MaxNumThreadsAvailable,
+  // num_threads) number of threads.  Resize does not support reducing the
+  // thread pool size.  If a smaller number of threads is requested, the thread
+  // pool remains the same size.  The thread pool is reused within Ceres with
+  // different number of threads, and we need to ensure we can support the
+  // largest number of threads requested.  It is safe to resize the thread pool
+  // while the workers are executing tasks, and the resizing is guaranteed to
+  // complete upon return.
+  void Resize(int num_threads);
+
+  // Adds a task to the queue and wakes up a blocked thread.  If the thread pool
+  // size is greater than zero, then the task will be executed by a currently
+  // idle thread or when a thread becomes available.  If the thread pool has no
+  // threads, then the task will never be executed and the user should use
+  // Resize() to create a non-empty thread pool.
+  void AddTask(const std::function<void()>& func);
+
+  // Returns the current size of the thread pool.
+  int Size();
+
+ private:
+  // Main loop for the threads which blocks on the task queue until work becomes
+  // available.  It will return if and only if Stop has been called.
+  void ThreadMainLoop();
+
+  // Signal all the threads to stop.  It does not block until the threads are
+  // finished.
+  void Stop();
+
+  // The queue that stores the units of work available for the thread pool.  The
+  // task queue maintains its own thread safety.
+  ConcurrentQueue<std::function<void()>> task_queue_;
+  std::vector<std::thread> thread_pool_;
+  std::mutex thread_pool_mutex_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_THREAD_POOL_H_
diff --git a/internal/ceres/thread_pool_test.cc b/internal/ceres/thread_pool_test.cc
new file mode 100644
index 0000000..2b1bf87
--- /dev/null
+++ b/internal/ceres/thread_pool_test.cc
@@ -0,0 +1,200 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+
+#include "ceres/thread_pool.h"
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Adds a number of tasks to the thread pool and ensures they all run.
+TEST(ThreadPool, AddTask) {
+  int value = 0;
+  const int num_tasks = 100;
+  {
+    ThreadPool thread_pool(2);
+
+    std::condition_variable condition;
+    std::mutex mutex;
+
+    for (int i = 0; i < num_tasks; ++i) {
+      thread_pool.AddTask([&]() {
+          std::lock_guard<std::mutex> lock(mutex);
+          ++value;
+          condition.notify_all();
+        });
+    }
+
+    std::unique_lock<std::mutex> lock(mutex);
+    condition.wait(lock, [&](){return value == num_tasks;});
+  }
+
+  EXPECT_EQ(num_tasks, value);
+}
+
+// Adds a number of tasks to the queue and resizes the thread pool while the
+// threads are executing their work.
+TEST(ThreadPool, ResizingDuringExecution) {
+  int value = 0;
+
+  const int num_tasks = 100;
+
+  // Run this test in a scope to delete the thread pool and all of the threads
+  // are stopped.
+  {
+    ThreadPool thread_pool(/*num_threads=*/2);
+
+    std::condition_variable condition;
+    std::mutex mutex;
+
+    // Acquire a lock on the mutex to prevent the threads from finishing their
+    // execution so we can test resizing the thread pool while the workers are
+    // executing a task.
+    std::unique_lock<std::mutex> lock(mutex);
+
+    // The same task for all of the workers to execute.
+    auto task = [&]() {
+      // This will block until the mutex is released inside the condition
+      // variable.
+      std::lock_guard<std::mutex> lock(mutex);
+      ++value;
+      condition.notify_all();
+    };
+
+    // Add the initial set of tasks to run.
+    for (int i = 0; i < num_tasks / 2; ++i) {
+      thread_pool.AddTask(task);
+    }
+
+    // Resize the thread pool while tasks are executing.
+    thread_pool.Resize(/*num_threads=*/3);
+
+    // Add more tasks to the thread pool to guarantee these are also completed.
+    for (int i = 0; i < num_tasks / 2; ++i) {
+      thread_pool.AddTask(task);
+    }
+
+    // Unlock the mutex to unblock all of the threads and wait until all of the
+    // tasks are completed.
+    condition.wait(lock, [&](){return value == num_tasks;});
+  }
+
+  EXPECT_EQ(num_tasks, value);
+}
+
+// Tests the destructor will wait until all running tasks are finished before
+// destructing the thread pool.
+TEST(ThreadPool, Destructor) {
+  // Ensure the hardware supports more than 1 thread to ensure the test will
+  // pass.
+  const int num_hardware_threads = std::thread::hardware_concurrency();
+  if (num_hardware_threads <= 1) {
+    LOG(ERROR)
+        << "Test not supported, the hardware does not support threading.";
+    return;
+  }
+
+  std::condition_variable condition;
+  std::mutex mutex;
+  // Lock the mutex to ensure the tasks are blocked.
+  std::unique_lock<std::mutex> master_lock(mutex);
+  int value = 0;
+
+  // Create a thread that will instantiate and delete the thread pool.  This is
+  // required because we need to block on the thread pool being deleted and
+  // signal the tasks to finish.
+  std::thread thread([&]() {
+    ThreadPool thread_pool(/*num_threads=*/2);
+
+    for (int i = 0; i < 100; ++i) {
+      thread_pool.AddTask([&]() {
+        // This will block until the mutex is released inside the condition
+        // variable.
+        std::lock_guard<std::mutex> lock(mutex);
+        ++value;
+        condition.notify_all();
+      });
+    }
+    // The thread pool should be deleted.
+  });
+
+  // Give the thread pool time to start, add all the tasks, and then delete
+  // itself.
+  std::this_thread::sleep_for(std::chrono::milliseconds(500));
+
+  // Unlock the tasks.
+  master_lock.unlock();
+
+  // Wait for the thread to complete.
+  thread.join();
+
+  EXPECT_EQ(100, value);
+}
+
+TEST(ThreadPool, Resize) {
+  // Ensure the hardware supports more than 1 thread to ensure the test will
+  // pass.
+  const int num_hardware_threads = std::thread::hardware_concurrency();
+  if (num_hardware_threads <= 1) {
+    LOG(ERROR)
+        << "Test not supported, the hardware does not support threading.";
+    return;
+  }
+
+  ThreadPool thread_pool(1);
+
+  EXPECT_EQ(1, thread_pool.Size());
+
+  thread_pool.Resize(2);
+
+  EXPECT_EQ(2, thread_pool.Size());
+
+  // Try reducing the thread pool size and verify it stays the same size.
+  thread_pool.Resize(1);
+  EXPECT_EQ(2, thread_pool.Size());
+}
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif // CERES_USE_CXX11_THREADS
diff --git a/internal/ceres/thread_token_provider.cc b/internal/ceres/thread_token_provider.cc
new file mode 100644
index 0000000..337217b
--- /dev/null
+++ b/internal/ceres/thread_token_provider.cc
@@ -0,0 +1,76 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#include "ceres/thread_token_provider.h"
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#endif
+
+namespace ceres {
+namespace internal {
+
+ThreadTokenProvider::ThreadTokenProvider(int num_threads) {
+  (void)num_threads;
+#ifdef CERES_USE_CXX11_THREADS
+  for (int i = 0; i < num_threads; i++) {
+    pool_.Push(i);
+  }
+#endif
+
+}
+
+int ThreadTokenProvider::Acquire() {
+#ifdef CERES_USE_OPENMP
+  return omp_get_thread_num();
+#endif
+
+#ifdef CERES_NO_THREADS
+  return 0;
+#endif
+
+#ifdef CERES_USE_CXX11_THREADS
+  int thread_id;
+  CHECK(pool_.Wait(&thread_id));
+  return thread_id;
+#endif
+
+}
+
+void ThreadTokenProvider::Release(int thread_id) {
+  (void)thread_id;
+#ifdef CERES_USE_CXX11_THREADS
+  pool_.Push(thread_id);
+#endif
+
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/thread_token_provider.h b/internal/ceres/thread_token_provider.h
new file mode 100644
index 0000000..f6298b7
--- /dev/null
+++ b/internal/ceres/thread_token_provider.h
@@ -0,0 +1,97 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#ifndef CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
+#define CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
+
+#include "ceres/internal/config.h"
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX11_THREADS
+#include "ceres/concurrent_queue.h"
+#endif
+
+namespace ceres {
+namespace internal {
+
+// Helper for C++11 thread number identification that is similar to
+// omp_get_thread_num() behaviour. This is necessary to support C++11
+// threading with a sequential thread id. This is used to access preallocated
+// resources in the parallelized code parts.  The sequence of tokens varies from
+// 0 to num_threads - 1 that can be acquired to identify the thread in a thread
+// pool.
+//
+// If CERES_NO_THREADS is defined, Acquire() always returns 0 and Release()
+// takes no action.
+//
+// If CERES_USE_OPENMP, omp_get_thread_num() is used to Acquire() with no action
+// in Release()
+//
+//
+// Example usage pseudocode:
+//
+// ThreadTokenProvider ttp(N); // allocate N tokens
+// Spawn N threads {
+//    int token = ttp.Acquire(); // get unique token
+//    ...
+//    ... use token to access resources bound to the thread
+//    ...
+//    ttp.Release(token); // return token to the pool
+//  }
+//
+class ThreadTokenProvider {
+ public:
+  ThreadTokenProvider(int num_threads);
+
+  // Returns the first token from the queue. The acquired value must be
+  // given back by Release().
+  int Acquire();
+
+  // Makes previously acquired token available for other threads.
+  void Release(int thread_id);
+
+ private:
+#ifdef CERES_USE_CXX11_THREADS
+  // This queue initially holds a sequence from 0..num_threads-1. Every
+  // Acquire() call the first number is removed from here. When the token is not
+  // needed anymore it shall be given back with corresponding Release()
+  // call. This concurrent queue is more expensive than TBB's version, so you
+  // should not acquire the thread ID on every for loop iteration.
+  ConcurrentQueue<int> pool_;
+#endif
+
+  ThreadTokenProvider(ThreadTokenProvider&);
+  ThreadTokenProvider& operator=(ThreadTokenProvider&);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
diff --git a/internal/ceres/tiny_solver_autodiff_function_test.cc b/internal/ceres/tiny_solver_autodiff_function_test.cc
new file mode 100644
index 0000000..90033fc
--- /dev/null
+++ b/internal/ceres/tiny_solver_autodiff_function_test.cc
@@ -0,0 +1,151 @@
+
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "ceres/tiny_solver_autodiff_function.h"
+#include "ceres/tiny_solver.h"
+#include "ceres/tiny_solver_test_util.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+
+struct AutoDiffTestFunctor {
+  template<typename T>
+  bool operator()(const T* const parameters, T* residuals) const {
+    // Shift the parameters so the solution is not at the origin, to prevent
+    // accidentally showing "PASS".
+    const T& a = parameters[0] - T(1.0);
+    const T& b = parameters[1] - T(2.0);
+    const T& c = parameters[2] - T(3.0);
+    residuals[0] = 2.*a + 0.*b + 1.*c;
+    residuals[1] = 0.*a + 4.*b + 6.*c;
+    return true;
+  }
+};
+
+// Leave a factor of 10 slop since these tests tend to mysteriously break on
+// other compilers or architectures if the tolerance is too tight.
+static double const kTolerance = std::numeric_limits<double>::epsilon() * 10;
+
+TEST(TinySolverAutoDiffFunction, SimpleFunction) {
+  typedef TinySolverAutoDiffFunction<AutoDiffTestFunctor, 2, 3>
+      AutoDiffTestFunction;
+  AutoDiffTestFunctor autodiff_test_functor;
+  AutoDiffTestFunction f(autodiff_test_functor);
+
+  Eigen::Vector3d x(2.0, 1.0, 4.0);
+  Eigen::Vector2d residuals;
+
+  // Check the case with cost-only evaluation.
+  residuals.setConstant(555);  // Arbitrary.
+  EXPECT_TRUE(f(&x(0), &residuals(0), nullptr));
+  EXPECT_NEAR(3.0, residuals(0), kTolerance);
+  EXPECT_NEAR(2.0, residuals(1), kTolerance);
+
+  // Check the case with cost and Jacobian evaluation.
+  Eigen::Matrix<double, 2, 3> jacobian;
+  residuals.setConstant(555);  // Arbitrary.
+  jacobian.setConstant(555);
+  EXPECT_TRUE(f(&x(0), &residuals(0), &jacobian(0, 0)));
+
+  // Verify cost.
+  EXPECT_NEAR(3.0, residuals(0), kTolerance);
+  EXPECT_NEAR(2.0, residuals(1), kTolerance);
+
+  // Verify Jacobian Row 1.
+  EXPECT_NEAR(2.0, jacobian(0, 0), kTolerance);
+  EXPECT_NEAR(0.0, jacobian(0, 1), kTolerance);
+  EXPECT_NEAR(1.0, jacobian(0, 2), kTolerance);
+
+  // Verify Jacobian row 2.
+  EXPECT_NEAR(0.0, jacobian(1, 0), kTolerance);
+  EXPECT_NEAR(4.0, jacobian(1, 1), kTolerance);
+  EXPECT_NEAR(6.0, jacobian(1, 2), kTolerance);
+}
+
+class DynamicResidualsFunctor {
+ public:
+  typedef double Scalar;
+  enum {
+    NUM_RESIDUALS = Eigen::Dynamic,
+    NUM_PARAMETERS = 3,
+  };
+
+  int NumResiduals() const {
+    return 2;
+  }
+
+  template<typename T>
+  bool operator()(const T* parameters, T* residuals) const {
+    // Jacobian is not evaluated by cost function, but by autodiff.
+    T* jacobian = nullptr;
+    return EvaluateResidualsAndJacobians(parameters, residuals, jacobian);
+  }
+};
+
+template<typename Function, typename Vector>
+void TestHelper(const Function& f, const Vector& x0) {
+  Vector x = x0;
+  Eigen::Vector2d residuals;
+  f(x.data(), residuals.data(), nullptr);
+  EXPECT_GT(residuals.squaredNorm() / 2.0, 1e-10);
+
+  TinySolver<Function> solver;
+  solver.Solve(f, &x);
+  EXPECT_NEAR(0.0, solver.summary.final_cost, 1e-10);
+}
+
+// A test case for when the number of residuals is
+// dynamically sized and we use autodiff
+TEST(TinySolverAutoDiffFunction, ResidualsDynamicAutoDiff) {
+  Eigen::Vector3d x0(0.76026643, -30.01799744, 0.55192142);
+
+  DynamicResidualsFunctor f;
+  using AutoDiffCostFunctor =
+      ceres::TinySolverAutoDiffFunction<DynamicResidualsFunctor,
+                                        Eigen::Dynamic,
+                                        3>;
+  AutoDiffCostFunctor f_autodiff(f);
+
+  Eigen::Vector2d residuals;
+  f_autodiff(x0.data(), residuals.data(), nullptr);
+  EXPECT_GT(residuals.squaredNorm() / 2.0, 1e-10);
+
+  TinySolver<AutoDiffCostFunctor> solver;
+  solver.Solve(f, &x0);
+  EXPECT_NEAR(0.0, solver.summary.final_cost, 1e-10);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/tiny_solver_cost_function_adapter_test.cc b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
new file mode 100644
index 0000000..620df41
--- /dev/null
+++ b/internal/ceres/tiny_solver_cost_function_adapter_test.cc
@@ -0,0 +1,132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/tiny_solver_cost_function_adapter.h"
+
+#include <algorithm>
+#include <cmath>
+#include <memory>
+
+#include "ceres/cost_function.h"
+#include "ceres/sized_cost_function.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+
+class CostFunction2x3 : public SizedCostFunction<2,3> {
+  virtual bool Evaluate(double const* const* parameters,
+                        double* residuals,
+                        double** jacobians) const {
+    double x = parameters[0][0];
+    double y = parameters[0][1];
+    double z = parameters[0][2];
+
+    residuals[0] = x + 2*y + 4*z;
+    residuals[1] = y * z;
+
+    if (jacobians && jacobians[0]) {
+      jacobians[0][0] = 1;
+      jacobians[0][1] = 2;
+      jacobians[0][2] = 4;
+
+      jacobians[0][3 + 0] = 0;
+      jacobians[0][3 + 1] = z;
+      jacobians[0][3 + 2] = y;
+    }
+
+    return true;
+  }
+};
+
+template<int kNumResiduals, int kNumParameters>
+void TestHelper() {
+  std::unique_ptr<CostFunction> cost_function(new CostFunction2x3);
+  typedef  TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters> CostFunctionAdapter;
+  CostFunctionAdapter cfa(*cost_function);
+  EXPECT_EQ(CostFunctionAdapter::NUM_RESIDUALS, kNumResiduals);
+  EXPECT_EQ(CostFunctionAdapter::NUM_PARAMETERS, kNumParameters);
+
+  EXPECT_EQ(cfa.NumResiduals(), 2);
+  EXPECT_EQ(cfa.NumParameters(), 3);
+
+  Eigen::Matrix<double, 2, 1> actual_residuals, expected_residuals;
+  Eigen::Matrix<double, 2, 3, Eigen::ColMajor> actual_jacobian;
+  Eigen::Matrix<double, 2, 3, Eigen::RowMajor> expected_jacobian;
+
+  double xyz[3] = { 1.0, -1.0, 2.0};
+  double* parameters[1] = {xyz};
+
+  // Check that residual only evaluation works.
+  cost_function->Evaluate(parameters, expected_residuals.data(), NULL);
+  cfa(xyz, actual_residuals.data(), NULL);
+  EXPECT_NEAR(
+      (expected_residuals - actual_residuals).norm() / actual_residuals.norm(),
+      0.0,
+      std::numeric_limits<double>::epsilon())
+      << "\nExpected residuals: " << expected_residuals.transpose()
+      << "\nActual residuals: " << actual_residuals.transpose();
+
+  // Check that residual and jacobian evaluation works.
+  double* jacobians[1] = {expected_jacobian.data()};
+  cost_function->Evaluate(parameters, expected_residuals.data(), jacobians);
+  cfa(xyz, actual_residuals.data(), actual_jacobian.data());
+
+  EXPECT_NEAR(
+      (expected_residuals - actual_residuals).norm() / actual_residuals.norm(),
+      0.0,
+      std::numeric_limits<double>::epsilon())
+      << "\nExpected residuals: " << expected_residuals.transpose()
+      << "\nActual residuals: " << actual_residuals.transpose();
+
+  EXPECT_NEAR(
+      (expected_jacobian - actual_jacobian).norm() / actual_jacobian.norm(),
+      0.0,
+      std::numeric_limits<double>::epsilon())
+      << "\nExpected jacobian: " << expected_jacobian.transpose()
+      << "\nActual jacobian: " << actual_jacobian.transpose();
+}
+
+TEST(TinySolverCostFunctionAdapter, StaticResidualsStaticParameterBlock) {
+  TestHelper<2, 3>();
+}
+
+TEST(TinySolverCostFunctionAdapter, DynamicResidualsStaticParameterBlock) {
+  TestHelper<Eigen::Dynamic, 3>();
+}
+
+TEST(TinySolverCostFunctionAdapter, StaticResidualsDynamicParameterBlock) {
+  TestHelper<2, Eigen::Dynamic>();
+}
+
+TEST(TinySolverCostFunctionAdapter, DynamicResidualsDynamicParameterBlock) {
+  TestHelper<Eigen::Dynamic, Eigen::Dynamic>();
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/tiny_solver_test.cc b/internal/ceres/tiny_solver_test.cc
new file mode 100644
index 0000000..2a8cd39
--- /dev/null
+++ b/internal/ceres/tiny_solver_test.cc
@@ -0,0 +1,172 @@
+
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "ceres/tiny_solver.h"
+#include "ceres/tiny_solver_test_util.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+
+typedef Eigen::Matrix<double, 2, 1> Vec2;
+typedef Eigen::Matrix<double, 3, 1> Vec3;
+typedef Eigen::VectorXd VecX;
+
+class ExampleStatic {
+ public:
+  typedef double Scalar;
+  enum {
+    // Can also be Eigen::Dynamic.
+    NUM_RESIDUALS = 2,
+    NUM_PARAMETERS = 3,
+  };
+  bool operator()(const double* parameters,
+                  double* residuals,
+                  double* jacobian) const {
+    return EvaluateResidualsAndJacobians(parameters, residuals, jacobian);
+  }
+};
+
+class ExampleParametersDynamic {
+ public:
+  typedef double Scalar;
+  enum {
+    NUM_RESIDUALS = 2,
+    NUM_PARAMETERS = Eigen::Dynamic,
+  };
+
+  int NumParameters() const {
+    return 3;
+  }
+
+  bool operator()(const double* parameters,
+                  double* residuals,
+                  double* jacobian) const {
+    return EvaluateResidualsAndJacobians(parameters, residuals, jacobian);
+  }
+};
+
+class ExampleResidualsDynamic {
+ public:
+  typedef double Scalar;
+  enum {
+    NUM_RESIDUALS = Eigen::Dynamic,
+    NUM_PARAMETERS = 3,
+  };
+
+  int NumResiduals() const {
+    return 2;
+  }
+
+  bool operator()(const double* parameters,
+                  double* residuals,
+                  double* jacobian) const {
+    return EvaluateResidualsAndJacobians(parameters, residuals, jacobian);
+  }
+};
+
+class ExampleAllDynamic {
+ public:
+  typedef double Scalar;
+  enum {
+    NUM_RESIDUALS = Eigen::Dynamic,
+    NUM_PARAMETERS = Eigen::Dynamic,
+  };
+
+  int NumResiduals() const {
+    return 2;
+  }
+
+  int NumParameters() const {
+    return 3;
+  }
+
+  bool operator()(const double* parameters,
+                  double* residuals,
+                  double* jacobian) const {
+    return EvaluateResidualsAndJacobians(parameters, residuals, jacobian);
+  }
+};
+
+template<typename Function, typename Vector>
+void TestHelper(const Function& f, const Vector& x0) {
+  Vector x = x0;
+  Vec2 residuals;
+  f(x.data(), residuals.data(), NULL);
+  EXPECT_GT(residuals.squaredNorm() / 2.0, 1e-10);
+
+  TinySolver<Function> solver;
+  solver.Solve(f, &x);
+  EXPECT_NEAR(0.0, solver.summary.final_cost, 1e-10);
+}
+
+// A test case for when the cost function is statically sized.
+TEST(TinySolver, SimpleExample) {
+  Vec3 x0(0.76026643, -30.01799744, 0.55192142);
+  ExampleStatic f;
+
+  TestHelper(f, x0);
+}
+
+// A test case for when the number of parameters is dynamically sized.
+TEST(TinySolver, ParametersDynamic) {
+  VecX x0(3);
+  x0 << 0.76026643, -30.01799744, 0.55192142;
+
+  ExampleParametersDynamic f;
+
+  TestHelper(f, x0);
+}
+
+// A test case for when the number of residuals is dynamically sized.
+TEST(TinySolver, ResidualsDynamic) {
+  Vec3 x0(0.76026643, -30.01799744, 0.55192142);
+
+  ExampleResidualsDynamic f;
+
+  TestHelper(f, x0);
+}
+
+// A test case for when the number of parameters and residuals is
+// dynamically sized.
+TEST(TinySolver, ParametersAndResidualsDynamic) {
+  VecX x0(3);
+  x0 << 0.76026643, -30.01799744, 0.55192142;
+
+  ExampleAllDynamic f;
+
+  TestHelper(f, x0);
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/tiny_solver_test_util.h b/internal/ceres/tiny_solver_test_util.h
new file mode 100644
index 0000000..48fe955
--- /dev/null
+++ b/internal/ceres/tiny_solver_test_util.h
@@ -0,0 +1,63 @@
+
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_TINY_SOLVER_TEST_UTIL_H_
+#define CERES_INTERNAL_TINY_SOLVER_TEST_UTIL_H_
+
+namespace ceres {
+
+template<typename T>
+bool EvaluateResidualsAndJacobians(const T* parameters,
+                                   T* residuals,
+                                   T* jacobian) {
+  T x = parameters[0];
+  T y = parameters[1];
+  T z = parameters[2];
+
+  residuals[0] = x + static_cast<T>(2) * y + static_cast<T>(4) * z;
+  residuals[1] = y * z;
+
+  if (jacobian) {
+    jacobian[0 * 2 + 0] = static_cast<T>(1);
+    jacobian[0 * 2 + 1] = static_cast<T>(0);
+
+    jacobian[1 * 2 + 0] = static_cast<T>(2);
+    jacobian[1 * 2 + 1] = z;
+
+    jacobian[2 * 2 + 0] = static_cast<T>(4);
+    jacobian[2 * 2 + 1] = y;
+  }
+  return true;
+}
+
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TINY_SOLVER_TEST_UTIL_H_
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
new file mode 100644
index 0000000..15b9674
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -0,0 +1,306 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/triplet_sparse_matrix.h"
+
+#include <algorithm>
+#include <cstddef>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/random.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+TripletSparseMatrix::TripletSparseMatrix()
+    : num_rows_(0),
+      num_cols_(0),
+      max_num_nonzeros_(0),
+      num_nonzeros_(0) {}
+
+
+TripletSparseMatrix::~TripletSparseMatrix() {}
+
+TripletSparseMatrix::TripletSparseMatrix(int num_rows,
+                                         int num_cols,
+                                         int max_num_nonzeros)
+    : num_rows_(num_rows),
+      num_cols_(num_cols),
+      max_num_nonzeros_(max_num_nonzeros),
+      num_nonzeros_(0) {
+  // All the sizes should at least be zero
+  CHECK_GE(num_rows, 0);
+  CHECK_GE(num_cols, 0);
+  CHECK_GE(max_num_nonzeros, 0);
+  AllocateMemory();
+}
+
+TripletSparseMatrix::TripletSparseMatrix(const int num_rows,
+                                         const int num_cols,
+                                         const std::vector<int>& rows,
+                                         const std::vector<int>& cols,
+                                         const std::vector<double>& values)
+    : num_rows_(num_rows),
+      num_cols_(num_cols),
+      max_num_nonzeros_(values.size()),
+      num_nonzeros_(values.size()) {
+  // All the sizes should at least be zero
+  CHECK_GE(num_rows, 0);
+  CHECK_GE(num_cols, 0);
+  CHECK_EQ(rows.size(), cols.size());
+  CHECK_EQ(rows.size(), values.size());
+  AllocateMemory();
+  std::copy(rows.begin(), rows.end(), rows_.get());
+  std::copy(cols.begin(), cols.end(), cols_.get());
+  std::copy(values.begin(), values.end(), values_.get());
+}
+
+TripletSparseMatrix::TripletSparseMatrix(const TripletSparseMatrix& orig)
+    : SparseMatrix(),
+      num_rows_(orig.num_rows_),
+      num_cols_(orig.num_cols_),
+      max_num_nonzeros_(orig.max_num_nonzeros_),
+      num_nonzeros_(orig.num_nonzeros_) {
+  AllocateMemory();
+  CopyData(orig);
+}
+
+TripletSparseMatrix& TripletSparseMatrix::operator=(
+    const TripletSparseMatrix& rhs) {
+  num_rows_ = rhs.num_rows_;
+  num_cols_ = rhs.num_cols_;
+  num_nonzeros_ = rhs.num_nonzeros_;
+  max_num_nonzeros_ = rhs.max_num_nonzeros_;
+  AllocateMemory();
+  CopyData(rhs);
+  return *this;
+}
+
+bool TripletSparseMatrix::AllTripletsWithinBounds() const {
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    if ((rows_[i] < 0) || (rows_[i] >= num_rows_) ||
+        (cols_[i] < 0) || (cols_[i] >= num_cols_))
+      return false;
+  }
+  return true;
+}
+
+void TripletSparseMatrix::Reserve(int new_max_num_nonzeros) {
+  CHECK_LE(num_nonzeros_, new_max_num_nonzeros)
+      << "Reallocation will cause data loss";
+
+  // Nothing to do if we have enough space already.
+  if (new_max_num_nonzeros <= max_num_nonzeros_)
+    return;
+
+  int* new_rows = new int[new_max_num_nonzeros];
+  int* new_cols = new int[new_max_num_nonzeros];
+  double* new_values = new double[new_max_num_nonzeros];
+
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    new_rows[i] = rows_[i];
+    new_cols[i] = cols_[i];
+    new_values[i] = values_[i];
+  }
+
+  rows_.reset(new_rows);
+  cols_.reset(new_cols);
+  values_.reset(new_values);
+
+  max_num_nonzeros_ = new_max_num_nonzeros;
+}
+
+void TripletSparseMatrix::SetZero() {
+  std::fill(values_.get(), values_.get() + max_num_nonzeros_, 0.0);
+  num_nonzeros_ = 0;
+}
+
+void TripletSparseMatrix::set_num_nonzeros(int num_nonzeros) {
+  CHECK_GE(num_nonzeros, 0);
+  CHECK_LE(num_nonzeros, max_num_nonzeros_);
+  num_nonzeros_ = num_nonzeros;
+}
+
+void TripletSparseMatrix::AllocateMemory() {
+  rows_.reset(new int[max_num_nonzeros_]);
+  cols_.reset(new int[max_num_nonzeros_]);
+  values_.reset(new double[max_num_nonzeros_]);
+}
+
+void TripletSparseMatrix::CopyData(const TripletSparseMatrix& orig) {
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    rows_[i] = orig.rows_[i];
+    cols_[i] = orig.cols_[i];
+    values_[i] = orig.values_[i];
+  }
+}
+
+void TripletSparseMatrix::RightMultiply(const double* x,  double* y) const {
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    y[rows_[i]] += values_[i]*x[cols_[i]];
+  }
+}
+
+void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    y[cols_[i]] += values_[i]*x[rows_[i]];
+  }
+}
+
+void TripletSparseMatrix::SquaredColumnNorm(double* x) const {
+  CHECK(x != nullptr);
+  VectorRef(x, num_cols_).setZero();
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    x[cols_[i]] += values_[i] * values_[i];
+  }
+}
+
+void TripletSparseMatrix::ScaleColumns(const double* scale) {
+  CHECK(scale != nullptr);
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    values_[i] = values_[i] * scale[cols_[i]];
+  }
+}
+
+void TripletSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+  dense_matrix->resize(num_rows_, num_cols_);
+  dense_matrix->setZero();
+  Matrix& m = *dense_matrix;
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    m(rows_[i], cols_[i]) += values_[i];
+  }
+}
+
+void TripletSparseMatrix::AppendRows(const TripletSparseMatrix& B) {
+  CHECK_EQ(B.num_cols(), num_cols_);
+  Reserve(num_nonzeros_ + B.num_nonzeros_);
+  for (int i = 0; i < B.num_nonzeros_; ++i) {
+    rows_.get()[num_nonzeros_] = B.rows()[i] + num_rows_;
+    cols_.get()[num_nonzeros_] = B.cols()[i];
+    values_.get()[num_nonzeros_++] = B.values()[i];
+  }
+  num_rows_ = num_rows_ + B.num_rows();
+}
+
+void TripletSparseMatrix::AppendCols(const TripletSparseMatrix& B) {
+  CHECK_EQ(B.num_rows(), num_rows_);
+  Reserve(num_nonzeros_ + B.num_nonzeros_);
+  for (int i = 0; i < B.num_nonzeros_; ++i, ++num_nonzeros_) {
+    rows_.get()[num_nonzeros_] = B.rows()[i];
+    cols_.get()[num_nonzeros_] = B.cols()[i] + num_cols_;
+    values_.get()[num_nonzeros_] = B.values()[i];
+  }
+  num_cols_ = num_cols_ + B.num_cols();
+}
+
+
+void TripletSparseMatrix::Resize(int new_num_rows, int new_num_cols) {
+  if ((new_num_rows >= num_rows_) && (new_num_cols >= num_cols_)) {
+    num_rows_  = new_num_rows;
+    num_cols_ = new_num_cols;
+    return;
+  }
+
+  num_rows_ = new_num_rows;
+  num_cols_ = new_num_cols;
+
+  int* r_ptr = rows_.get();
+  int* c_ptr = cols_.get();
+  double* v_ptr = values_.get();
+
+  int dropped_terms = 0;
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    if ((r_ptr[i] < num_rows_) && (c_ptr[i] < num_cols_)) {
+      if (dropped_terms) {
+        r_ptr[i-dropped_terms] = r_ptr[i];
+        c_ptr[i-dropped_terms] = c_ptr[i];
+        v_ptr[i-dropped_terms] = v_ptr[i];
+      }
+    } else {
+      ++dropped_terms;
+    }
+  }
+  num_nonzeros_ -= dropped_terms;
+}
+
+TripletSparseMatrix* TripletSparseMatrix::CreateSparseDiagonalMatrix(
+    const double* values, int num_rows) {
+  TripletSparseMatrix* m =
+      new TripletSparseMatrix(num_rows, num_rows, num_rows);
+  for (int i = 0; i < num_rows; ++i) {
+    m->mutable_rows()[i] = i;
+    m->mutable_cols()[i] = i;
+    m->mutable_values()[i] = values[i];
+  }
+  m->set_num_nonzeros(num_rows);
+  return m;
+}
+
+void TripletSparseMatrix::ToTextFile(FILE* file) const {
+  CHECK(file != nullptr);
+  for (int i = 0; i < num_nonzeros_; ++i) {
+    fprintf(file, "% 10d % 10d %17f\n", rows_[i], cols_[i], values_[i]);
+  }
+}
+
+TripletSparseMatrix* TripletSparseMatrix::CreateRandomMatrix(
+    const TripletSparseMatrix::RandomMatrixOptions& options) {
+  CHECK_GT(options.num_rows, 0);
+  CHECK_GT(options.num_cols, 0);
+  CHECK_GT(options.density, 0.0);
+  CHECK_LE(options.density, 1.0);
+
+  std::vector<int> rows;
+  std::vector<int> cols;
+  std::vector<double> values;
+  while (rows.empty()) {
+    rows.clear();
+    cols.clear();
+    values.clear();
+    for (int r = 0; r < options.num_rows; ++r) {
+      for (int c = 0; c < options.num_cols; ++c) {
+        if (RandDouble() <= options.density) {
+          rows.push_back(r);
+          cols.push_back(c);
+          values.push_back(RandNormal());
+        }
+      }
+    }
+  }
+
+  return new TripletSparseMatrix(
+      options.num_rows, options.num_cols, rows, cols, values);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
new file mode 100644
index 0000000..606f8e8
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -0,0 +1,155 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
+
+#include <memory>
+#include <vector>
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// An implementation of the SparseMatrix interface to store and
+// manipulate sparse matrices in triplet (i,j,s) form.  This object is
+// inspired by the design of the cholmod_triplet struct used in the
+// SuiteSparse package and is memory layout compatible with it.
+class TripletSparseMatrix : public SparseMatrix {
+ public:
+  TripletSparseMatrix();
+  TripletSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
+  TripletSparseMatrix(int num_rows,
+                      int num_cols,
+                      const std::vector<int>& rows,
+                      const std::vector<int>& cols,
+                      const std::vector<double>& values);
+
+  explicit TripletSparseMatrix(const TripletSparseMatrix& orig);
+
+  TripletSparseMatrix& operator=(const TripletSparseMatrix& rhs);
+
+  ~TripletSparseMatrix();
+
+  // Implementation of the SparseMatrix interface.
+  virtual void SetZero();
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual void LeftMultiply(const double* x, double* y) const;
+  virtual void SquaredColumnNorm(double* x) const;
+  virtual void ScaleColumns(const double* scale);
+  virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+  virtual void ToTextFile(FILE* file) const;
+  virtual int num_rows()        const { return num_rows_;     }
+  virtual int num_cols()        const { return num_cols_;     }
+  virtual int num_nonzeros()    const { return num_nonzeros_; }
+  virtual const double* values()  const { return values_.get(); }
+  virtual double* mutable_values()      { return values_.get(); }
+  virtual void set_num_nonzeros(int num_nonzeros);
+
+  // Increase max_num_nonzeros and correspondingly increase the size
+  // of rows_, cols_ and values_. If new_max_num_nonzeros is smaller
+  // than max_num_nonzeros_, then num_non_zeros should be less than or
+  // equal to new_max_num_nonzeros, otherwise data loss is possible
+  // and the method crashes.
+  void Reserve(int new_max_num_nonzeros);
+
+  // Append the matrix B at the bottom of this matrix. B should have
+  // the same number of columns as num_cols_.
+  void AppendRows(const TripletSparseMatrix& B);
+
+  // Append the matrix B at the right of this matrix. B should have
+  // the same number of rows as num_rows_;
+  void AppendCols(const TripletSparseMatrix& B);
+
+  // Resize the matrix. Entries which fall outside the new matrix
+  // bounds are dropped and the num_non_zeros changed accordingly.
+  void Resize(int new_num_rows, int new_num_cols);
+
+  int max_num_nonzeros() const { return max_num_nonzeros_; }
+  const int* rows()      const { return rows_.get();       }
+  const int* cols()      const { return cols_.get();       }
+  int* mutable_rows()          { return rows_.get();       }
+  int* mutable_cols()          { return cols_.get();       }
+
+  // Returns true if the entries of the matrix obey the row, column,
+  // and column size bounds and false otherwise.
+  bool AllTripletsWithinBounds() const;
+
+  bool IsValid() const { return AllTripletsWithinBounds(); }
+
+  // Build a sparse diagonal matrix of size num_rows x num_rows from
+  // the array values. Entries of the values array are copied into the
+  // sparse matrix.
+  static TripletSparseMatrix* CreateSparseDiagonalMatrix(const double* values,
+                                                         int num_rows);
+
+  // Options struct to control the generation of random
+  // TripletSparseMatrix objects.
+  struct RandomMatrixOptions {
+    int num_rows;
+    int num_cols;
+    // 0 < density <= 1 is the probability of an entry being
+    // structurally non-zero. A given random matrix will not have
+    // precisely this density.
+    double density;
+  };
+
+  // Create a random CompressedRowSparseMatrix whose entries are
+  // normally distributed and whose structure is determined by
+  // RandomMatrixOptions.
+  //
+  // Caller owns the result.
+  static TripletSparseMatrix* CreateRandomMatrix(
+      const TripletSparseMatrix::RandomMatrixOptions& options);
+
+ private:
+  void AllocateMemory();
+  void CopyData(const TripletSparseMatrix& orig);
+
+  int num_rows_;
+  int num_cols_;
+  int max_num_nonzeros_;
+  int num_nonzeros_;
+
+  // The data is stored as three arrays. For each i, values_[i] is
+  // stored at the location (rows_[i], cols_[i]). If the there are
+  // multiple entries with the same (rows_[i], cols_[i]), the values_
+  // entries corresponding to them are summed up.
+  std::unique_ptr<int[]> rows_;
+  std::unique_ptr<int[]> cols_;
+  std::unique_ptr<double[]> values_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H__
diff --git a/internal/ceres/triplet_sparse_matrix_test.cc b/internal/ceres/triplet_sparse_matrix_test.cc
new file mode 100644
index 0000000..d71df7b
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix_test.cc
@@ -0,0 +1,319 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/triplet_sparse_matrix.h"
+
+#include <memory>
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(TripletSparseMatrix, DefaultConstructorReturnsEmptyObject) {
+  TripletSparseMatrix m;
+  EXPECT_EQ(m.num_rows(), 0);
+  EXPECT_EQ(m.num_cols(), 0);
+  EXPECT_EQ(m.num_nonzeros(), 0);
+  EXPECT_EQ(m.max_num_nonzeros(), 0);
+}
+
+TEST(TripletSparseMatrix, SimpleConstructorAndBasicOperations) {
+  // Build a matrix
+  TripletSparseMatrix m(2, 5, 4);
+  EXPECT_EQ(m.num_rows(), 2);
+  EXPECT_EQ(m.num_cols(), 5);
+  EXPECT_EQ(m.num_nonzeros(), 0);
+  EXPECT_EQ(m.max_num_nonzeros(), 4);
+
+  m.mutable_rows()[0] = 0;
+  m.mutable_cols()[0] = 1;
+  m.mutable_values()[0] = 2.5;
+
+  m.mutable_rows()[1] = 1;
+  m.mutable_cols()[1] = 4;
+  m.mutable_values()[1] = 5.2;
+  m.set_num_nonzeros(2);
+
+  EXPECT_EQ(m.num_nonzeros(), 2);
+
+  ASSERT_TRUE(m.AllTripletsWithinBounds());
+
+  // We should never be able resize and lose data
+  EXPECT_DEATH_IF_SUPPORTED(m.Reserve(1), "Reallocation will cause data loss");
+
+  // We should be able to resize while preserving data
+  m.Reserve(50);
+  EXPECT_EQ(m.max_num_nonzeros(), 50);
+
+  m.Reserve(3);
+  EXPECT_EQ(m.max_num_nonzeros(), 50);  // The space is already reserved.
+
+  EXPECT_EQ(m.rows()[0], 0);
+  EXPECT_EQ(m.rows()[1], 1);
+
+  EXPECT_EQ(m.cols()[0], 1);
+  EXPECT_EQ(m.cols()[1], 4);
+
+  EXPECT_DOUBLE_EQ(m.values()[0], 2.5);
+  EXPECT_DOUBLE_EQ(m.values()[1], 5.2);
+
+  // Bounds check should fail
+  m.mutable_rows()[0] = 10;
+  EXPECT_FALSE(m.AllTripletsWithinBounds());
+
+  m.mutable_rows()[0] = 1;
+  m.mutable_cols()[0] = 100;
+  EXPECT_FALSE(m.AllTripletsWithinBounds());
+
+  // Remove all data and then resize the data store
+  m.SetZero();
+  EXPECT_EQ(m.num_nonzeros(), 0);
+  m.Reserve(1);
+}
+
+TEST(TripletSparseMatrix, CopyConstructor) {
+  TripletSparseMatrix orig(2, 5, 4);
+  orig.mutable_rows()[0] = 0;
+  orig.mutable_cols()[0] = 1;
+  orig.mutable_values()[0] = 2.5;
+
+  orig.mutable_rows()[1] = 1;
+  orig.mutable_cols()[1] = 4;
+  orig.mutable_values()[1] = 5.2;
+  orig.set_num_nonzeros(2);
+
+  TripletSparseMatrix cpy(orig);
+
+  EXPECT_EQ(cpy.num_rows(), 2);
+  EXPECT_EQ(cpy.num_cols(), 5);
+  ASSERT_EQ(cpy.num_nonzeros(), 2);
+  EXPECT_EQ(cpy.max_num_nonzeros(), 4);
+
+  EXPECT_EQ(cpy.rows()[0], 0);
+  EXPECT_EQ(cpy.rows()[1], 1);
+
+  EXPECT_EQ(cpy.cols()[0], 1);
+  EXPECT_EQ(cpy.cols()[1], 4);
+
+  EXPECT_DOUBLE_EQ(cpy.values()[0], 2.5);
+  EXPECT_DOUBLE_EQ(cpy.values()[1], 5.2);
+}
+
+TEST(TripletSparseMatrix, AssignmentOperator) {
+  TripletSparseMatrix orig(2, 5, 4);
+  orig.mutable_rows()[0] = 0;
+  orig.mutable_cols()[0] = 1;
+  orig.mutable_values()[0] = 2.5;
+
+  orig.mutable_rows()[1] = 1;
+  orig.mutable_cols()[1] = 4;
+  orig.mutable_values()[1] = 5.2;
+  orig.set_num_nonzeros(2);
+
+  TripletSparseMatrix cpy(3, 50, 40);
+  cpy.mutable_rows()[0] = 0;
+  cpy.mutable_cols()[0] = 10;
+  cpy.mutable_values()[0] = 10.22;
+
+  cpy.mutable_rows()[1] = 2;
+  cpy.mutable_cols()[1] = 23;
+  cpy.mutable_values()[1] = 34.45;
+
+  cpy.mutable_rows()[0] = 0;
+  cpy.mutable_cols()[0] = 10;
+  cpy.mutable_values()[0] = 10.22;
+
+  cpy.mutable_rows()[1] = 0;
+  cpy.mutable_cols()[1] = 3;
+  cpy.mutable_values()[1] = 4.4;
+  cpy.set_num_nonzeros(3);
+
+  cpy = orig;
+
+  EXPECT_EQ(cpy.num_rows(), 2);
+  EXPECT_EQ(cpy.num_cols(), 5);
+  ASSERT_EQ(cpy.num_nonzeros(), 2);
+  EXPECT_EQ(cpy.max_num_nonzeros(), 4);
+
+  EXPECT_EQ(cpy.rows()[0], 0);
+  EXPECT_EQ(cpy.rows()[1], 1);
+
+  EXPECT_EQ(cpy.cols()[0], 1);
+  EXPECT_EQ(cpy.cols()[1], 4);
+
+  EXPECT_DOUBLE_EQ(cpy.values()[0], 2.5);
+  EXPECT_DOUBLE_EQ(cpy.values()[1], 5.2);
+}
+
+TEST(TripletSparseMatrix, AppendRows) {
+  // Build one matrix.
+  TripletSparseMatrix m(2, 5, 4);
+  m.mutable_rows()[0] = 0;
+  m.mutable_cols()[0] = 1;
+  m.mutable_values()[0] = 2.5;
+
+  m.mutable_rows()[1] = 1;
+  m.mutable_cols()[1] = 4;
+  m.mutable_values()[1] = 5.2;
+  m.set_num_nonzeros(2);
+
+  // Build another matrix.
+  TripletSparseMatrix a(10, 5, 4);
+  a.mutable_rows()[0] = 0;
+  a.mutable_cols()[0] = 1;
+  a.mutable_values()[0] = 3.5;
+
+  a.mutable_rows()[1] = 1;
+  a.mutable_cols()[1] = 4;
+  a.mutable_values()[1] = 6.2;
+
+  a.mutable_rows()[2] = 9;
+  a.mutable_cols()[2] = 5;
+  a.mutable_values()[2] = 1;
+  a.set_num_nonzeros(3);
+
+  // Glue the second matrix to the bottom of the first.
+  m.AppendRows(a);
+
+  EXPECT_EQ(m.num_rows(), 12);
+  EXPECT_EQ(m.num_cols(), 5);
+  ASSERT_EQ(m.num_nonzeros(), 5);
+
+  EXPECT_EQ(m.values()[0], 2.5);
+  EXPECT_EQ(m.values()[1], 5.2);
+  EXPECT_EQ(m.values()[2], 3.5);
+  EXPECT_EQ(m.values()[3], 6.2);
+  EXPECT_EQ(m.values()[4], 1);
+
+  EXPECT_EQ(m.rows()[0], 0);
+  EXPECT_EQ(m.rows()[1], 1);
+  EXPECT_EQ(m.rows()[2], 2);
+  EXPECT_EQ(m.rows()[3], 3);
+  EXPECT_EQ(m.rows()[4], 11);
+
+  EXPECT_EQ(m.cols()[0], 1);
+  EXPECT_EQ(m.cols()[1], 4);
+  EXPECT_EQ(m.cols()[2], 1);
+  EXPECT_EQ(m.cols()[3], 4);
+  EXPECT_EQ(m.cols()[4], 5);
+}
+
+TEST(TripletSparseMatrix, AppendCols) {
+  // Build one matrix.
+  TripletSparseMatrix m(2, 5, 4);
+  m.mutable_rows()[0] = 0;
+  m.mutable_cols()[0] = 1;
+  m.mutable_values()[0] = 2.5;
+
+  m.mutable_rows()[1] = 1;
+  m.mutable_cols()[1] = 4;
+  m.mutable_values()[1] = 5.2;
+  m.set_num_nonzeros(2);
+
+  // Build another matrix.
+  TripletSparseMatrix a(2, 15, 4);
+  a.mutable_rows()[0] = 0;
+  a.mutable_cols()[0] = 1;
+  a.mutable_values()[0] = 3.5;
+
+  a.mutable_rows()[1] = 1;
+  a.mutable_cols()[1] = 4;
+  a.mutable_values()[1] = 6.2;
+
+  a.mutable_rows()[2] = 0;
+  a.mutable_cols()[2] = 10;
+  a.mutable_values()[2] = 1;
+  a.set_num_nonzeros(3);
+
+  // Glue the second matrix to the left of the first.
+  m.AppendCols(a);
+
+  EXPECT_EQ(m.num_rows(), 2);
+  EXPECT_EQ(m.num_cols(), 20);
+  ASSERT_EQ(m.num_nonzeros(), 5);
+
+  EXPECT_EQ(m.values()[0], 2.5);
+  EXPECT_EQ(m.values()[1], 5.2);
+  EXPECT_EQ(m.values()[2], 3.5);
+  EXPECT_EQ(m.values()[3], 6.2);
+  EXPECT_EQ(m.values()[4], 1);
+
+  EXPECT_EQ(m.rows()[0], 0);
+  EXPECT_EQ(m.rows()[1], 1);
+  EXPECT_EQ(m.rows()[2], 0);
+  EXPECT_EQ(m.rows()[3], 1);
+  EXPECT_EQ(m.rows()[4], 0);
+
+  EXPECT_EQ(m.cols()[0], 1);
+  EXPECT_EQ(m.cols()[1], 4);
+  EXPECT_EQ(m.cols()[2], 6);
+  EXPECT_EQ(m.cols()[3], 9);
+  EXPECT_EQ(m.cols()[4], 15);
+}
+
+TEST(TripletSparseMatrix, CreateDiagonalMatrix) {
+  std::unique_ptr<double[]> values(new double[10]);
+  for (int i = 0; i < 10; ++i)
+    values[i] = i;
+
+  std::unique_ptr<TripletSparseMatrix> m(
+      TripletSparseMatrix::CreateSparseDiagonalMatrix(values.get(), 10));
+  EXPECT_EQ(m->num_rows(), 10);
+  EXPECT_EQ(m->num_cols(), 10);
+  ASSERT_EQ(m->num_nonzeros(), 10);
+  for (int i = 0; i < 10 ; ++i) {
+    EXPECT_EQ(m->rows()[i], i);
+    EXPECT_EQ(m->cols()[i], i);
+    EXPECT_EQ(m->values()[i], i);
+  }
+}
+
+TEST(TripletSparseMatrix, Resize) {
+  TripletSparseMatrix m(10, 20, 200);
+  int nnz = 0;
+  for (int i = 0; i < 10; ++i) {
+    for (int j = 0; j < 20; ++j) {
+      m.mutable_rows()[nnz] = i;
+      m.mutable_cols()[nnz] = j;
+      m.mutable_values()[nnz++] = i+j;
+    }
+  }
+  m.set_num_nonzeros(nnz);
+  m.Resize(5, 6);
+  EXPECT_EQ(m.num_rows(), 5);
+  EXPECT_EQ(m.num_cols(), 6);
+  ASSERT_EQ(m.num_nonzeros(), 30);
+  for (int i = 0; i < 30; ++i) {
+    EXPECT_EQ(m.values()[i], m.rows()[i] + m.cols()[i]);
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
new file mode 100644
index 0000000..5505cbb
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -0,0 +1,799 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/trust_region_minimizer.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/coordinate_descent_minimizer.h"
+#include "ceres/evaluator.h"
+#include "ceres/file.h"
+#include "ceres/line_search.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+// Helper macro to simplify some of the control flow.
+#define RETURN_IF_ERROR_AND_LOG(expr)                            \
+  do {                                                           \
+    if (!(expr)) {                                               \
+      LOG(ERROR) << "Terminating: " << solver_summary_->message; \
+      return;                                                    \
+    }                                                            \
+  } while (0)
+
+namespace ceres {
+namespace internal {
+
+TrustRegionMinimizer::~TrustRegionMinimizer() {}
+
+void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
+                                    double* parameters,
+                                    Solver::Summary* solver_summary) {
+  start_time_in_secs_ = WallTimeInSeconds();
+  iteration_start_time_in_secs_ = start_time_in_secs_;
+  Init(options, parameters, solver_summary);
+  RETURN_IF_ERROR_AND_LOG(IterationZero());
+
+  // Create the TrustRegionStepEvaluator. The construction needs to be
+  // delayed to this point because we need the cost for the starting
+  // point to initialize the step evaluator.
+  step_evaluator_.reset(new TrustRegionStepEvaluator(
+      x_cost_,
+      options_.use_nonmonotonic_steps
+          ? options_.max_consecutive_nonmonotonic_steps
+          : 0));
+
+  while (FinalizeIterationAndCheckIfMinimizerCanContinue()) {
+    iteration_start_time_in_secs_ = WallTimeInSeconds();
+    iteration_summary_ = IterationSummary();
+    iteration_summary_.iteration =
+        solver_summary->iterations.back().iteration + 1;
+
+    RETURN_IF_ERROR_AND_LOG(ComputeTrustRegionStep());
+    if (!iteration_summary_.step_is_valid) {
+      RETURN_IF_ERROR_AND_LOG(HandleInvalidStep());
+      continue;
+    }
+
+    if (options_.is_constrained) {
+      // Use a projected line search to enforce the bounds constraints
+      // and improve the quality of the step.
+      DoLineSearch(x_, gradient_, x_cost_, &delta_);
+    }
+
+    ComputeCandidatePointAndEvaluateCost();
+    DoInnerIterationsIfNeeded();
+
+    if (ParameterToleranceReached()) {
+      return;
+    }
+
+    if (FunctionToleranceReached()) {
+      return;
+    }
+
+    if (IsStepSuccessful()) {
+      RETURN_IF_ERROR_AND_LOG(HandleSuccessfulStep());
+      continue;
+    }
+
+    HandleUnsuccessfulStep();
+  }
+}
+
+// Initialize the minimizer, allocate working space and set some of
+// the fields in the solver_summary.
+void TrustRegionMinimizer::Init(const Minimizer::Options& options,
+                                double* parameters,
+                                Solver::Summary* solver_summary) {
+  options_ = options;
+  sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
+       options_.trust_region_minimizer_iterations_to_dump.end());
+
+  parameters_ = parameters;
+
+  solver_summary_ = solver_summary;
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  solver_summary_->num_successful_steps = 0;
+  solver_summary_->num_unsuccessful_steps = 0;
+  solver_summary_->is_constrained = options.is_constrained;
+
+  CHECK(options_.evaluator != nullptr);
+  CHECK(options_.jacobian != nullptr);
+  CHECK(options_.trust_region_strategy != nullptr);
+  evaluator_ = options_.evaluator.get();
+  jacobian_ = options_.jacobian.get();
+  strategy_ = options_.trust_region_strategy.get();
+
+  is_not_silent_ = !options.is_silent;
+  inner_iterations_are_enabled_ =
+      options.inner_iteration_minimizer.get() != NULL;
+  inner_iterations_were_useful_ = false;
+
+  num_parameters_ = evaluator_->NumParameters();
+  num_effective_parameters_ = evaluator_->NumEffectiveParameters();
+  num_residuals_ = evaluator_->NumResiduals();
+  num_consecutive_invalid_steps_ = 0;
+
+  x_ = ConstVectorRef(parameters_, num_parameters_);
+  x_norm_ = x_.norm();
+  residuals_.resize(num_residuals_);
+  trust_region_step_.resize(num_effective_parameters_);
+  delta_.resize(num_effective_parameters_);
+  candidate_x_.resize(num_parameters_);
+  gradient_.resize(num_effective_parameters_);
+  model_residuals_.resize(num_residuals_);
+  negative_gradient_.resize(num_effective_parameters_);
+  projected_gradient_step_.resize(num_parameters_);
+
+  // By default scaling is one, if the user requests Jacobi scaling of
+  // the Jacobian, we will compute and overwrite this vector.
+  jacobian_scaling_ = Vector::Ones(num_effective_parameters_);
+
+  x_norm_ = -1;  // Invalid value
+  x_cost_ = std::numeric_limits<double>::max();
+  minimum_cost_ = x_cost_;
+  model_cost_change_ = 0.0;
+}
+
+// 1. Project the initial solution onto the feasible set if needed.
+// 2. Compute the initial cost, jacobian & gradient.
+//
+// Return true if all computations can be performed successfully.
+bool TrustRegionMinimizer::IterationZero() {
+  iteration_summary_ = IterationSummary();
+  iteration_summary_.iteration = 0;
+  iteration_summary_.step_is_valid = false;
+  iteration_summary_.step_is_successful = false;
+  iteration_summary_.cost_change = 0.0;
+  iteration_summary_.gradient_max_norm = 0.0;
+  iteration_summary_.gradient_norm = 0.0;
+  iteration_summary_.step_norm = 0.0;
+  iteration_summary_.relative_decrease = 0.0;
+  iteration_summary_.eta = options_.eta;
+  iteration_summary_.linear_solver_iterations = 0;
+  iteration_summary_.step_solver_time_in_seconds = 0;
+
+  if (options_.is_constrained) {
+    delta_.setZero();
+    if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
+      solver_summary_->message =
+          "Unable to project initial point onto the feasible set.";
+      solver_summary_->termination_type = FAILURE;
+      return false;
+    }
+
+    x_ = candidate_x_;
+    x_norm_ = x_.norm();
+  }
+
+  if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/true)) {
+    return false;
+  }
+
+  solver_summary_->initial_cost = x_cost_ + solver_summary_->fixed_cost;
+  iteration_summary_.step_is_valid = true;
+  iteration_summary_.step_is_successful = true;
+  return true;
+}
+
+// For the current x_, compute
+//
+//  1. Cost
+//  2. Jacobian
+//  3. Gradient
+//  4. Scale the Jacobian if needed (and compute the scaling if we are
+//     in iteration zero).
+//  5. Compute the 2 and max norm of the gradient.
+//
+// Returns true if all computations could be performed
+// successfully. Any failures are considered fatal and the
+// Solver::Summary is updated to indicate this.
+bool TrustRegionMinimizer::EvaluateGradientAndJacobian(
+    bool new_evaluation_point) {
+  Evaluator::EvaluateOptions evaluate_options;
+  evaluate_options.new_evaluation_point = new_evaluation_point;
+  if (!evaluator_->Evaluate(evaluate_options,
+                            x_.data(),
+                            &x_cost_,
+                            residuals_.data(),
+                            gradient_.data(),
+                            jacobian_)) {
+    solver_summary_->message = "Residual and Jacobian evaluation failed.";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
+
+  if (options_.jacobi_scaling) {
+    if (iteration_summary_.iteration == 0) {
+      // Compute a scaling vector that is used to improve the
+      // conditioning of the Jacobian.
+      //
+      // jacobian_scaling_ = diag(J'J)^{-1}
+      jacobian_->SquaredColumnNorm(jacobian_scaling_.data());
+      for (int i = 0; i < jacobian_->num_cols(); ++i) {
+        // Add one to the denominator to prevent division by zero.
+        jacobian_scaling_[i] = 1.0 / (1.0 + sqrt(jacobian_scaling_[i]));
+      }
+    }
+
+    // jacobian = jacobian * diag(J'J) ^{-1}
+    jacobian_->ScaleColumns(jacobian_scaling_.data());
+  }
+
+  // The gradient exists in the local tangent space. To account for
+  // the bounds constraints correctly, instead of just computing the
+  // norm of the gradient vector, we compute
+  //
+  // |Plus(x, -gradient) - x|
+  //
+  // Where the Plus operator lifts the negative gradient to the
+  // ambient space, adds it to x and projects it on the hypercube
+  // defined by the bounds.
+  negative_gradient_ = -gradient_;
+  if (!evaluator_->Plus(x_.data(),
+                        negative_gradient_.data(),
+                        projected_gradient_step_.data())) {
+    solver_summary_->message =
+        "projected_gradient_step = Plus(x, -gradient) failed.";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.gradient_max_norm =
+      (x_ - projected_gradient_step_).lpNorm<Eigen::Infinity>();
+  iteration_summary_.gradient_norm = (x_ - projected_gradient_step_).norm();
+  return true;
+}
+
+// 1. Add the final timing information to the iteration summary.
+// 2. Run the callbacks
+// 3. Check for termination based on
+//    a. Run time
+//    b. Iteration count
+//    c. Max norm of the gradient
+//    d. Size of the trust region radius.
+//
+// Returns true if user did not terminate the solver and none of these
+// termination criterion are met.
+bool TrustRegionMinimizer::FinalizeIterationAndCheckIfMinimizerCanContinue() {
+  if (iteration_summary_.step_is_successful) {
+    ++solver_summary_->num_successful_steps;
+    if (x_cost_ < minimum_cost_) {
+      minimum_cost_ = x_cost_;
+      VectorRef(parameters_, num_parameters_) = x_;
+      iteration_summary_.step_is_nonmonotonic = false;
+    } else {
+      iteration_summary_.step_is_nonmonotonic = true;
+    }
+  } else {
+    ++solver_summary_->num_unsuccessful_steps;
+  }
+
+  iteration_summary_.trust_region_radius = strategy_->Radius();
+  iteration_summary_.iteration_time_in_seconds =
+      WallTimeInSeconds() - iteration_start_time_in_secs_;
+  iteration_summary_.cumulative_time_in_seconds =
+      WallTimeInSeconds() - start_time_in_secs_ +
+      solver_summary_->preprocessor_time_in_seconds;
+
+  solver_summary_->iterations.push_back(iteration_summary_);
+
+  if (!RunCallbacks(options_, iteration_summary_, solver_summary_)) {
+    return false;
+  }
+
+  if (MaxSolverTimeReached()) {
+    return false;
+  }
+
+  if (MaxSolverIterationsReached()) {
+    return false;
+  }
+
+  if (GradientToleranceReached()) {
+    return false;
+  }
+
+  if (MinTrustRegionRadiusReached()) {
+    return false;
+  }
+
+  return true;
+}
+
+// Compute the trust region step using the TrustRegionStrategy chosen
+// by the user.
+//
+// If the strategy returns with LINEAR_SOLVER_FATAL_ERROR, which
+// indicates an unrecoverable error, return false. This is the only
+// condition that returns false.
+//
+// If the strategy returns with LINEAR_SOLVER_FAILURE, which indicates
+// a numerical failure that could be recovered from by retrying
+// (e.g. by increasing the strength of the regularization), we set
+// iteration_summary_.step_is_valid to false and return true.
+//
+// In all other cases, we compute the decrease in the trust region
+// model problem. In exact arithmetic, this should always be
+// positive, but due to numerical problems in the TrustRegionStrategy
+// or round off error when computing the decrease it may be
+// negative. In which case again, we set
+// iteration_summary_.step_is_valid to false.
+bool TrustRegionMinimizer::ComputeTrustRegionStep() {
+  const double strategy_start_time = WallTimeInSeconds();
+  iteration_summary_.step_is_valid = false;
+  TrustRegionStrategy::PerSolveOptions per_solve_options;
+  per_solve_options.eta = options_.eta;
+  if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
+           options_.trust_region_minimizer_iterations_to_dump.end(),
+           iteration_summary_.iteration) !=
+      options_.trust_region_minimizer_iterations_to_dump.end()) {
+    per_solve_options.dump_format_type =
+        options_.trust_region_problem_dump_format_type;
+    per_solve_options.dump_filename_base =
+        JoinPath(options_.trust_region_problem_dump_directory,
+                 StringPrintf("ceres_solver_iteration_%03d",
+                              iteration_summary_.iteration));
+  }
+
+  TrustRegionStrategy::Summary strategy_summary =
+      strategy_->ComputeStep(per_solve_options,
+                             jacobian_,
+                             residuals_.data(),
+                             trust_region_step_.data());
+
+  if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+    solver_summary_->message =
+        "Linear solver failed due to unrecoverable "
+        "non-numeric causes. Please see the error log for clues. ";
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  iteration_summary_.step_solver_time_in_seconds =
+      WallTimeInSeconds() - strategy_start_time;
+  iteration_summary_.linear_solver_iterations = strategy_summary.num_iterations;
+
+  if (strategy_summary.termination_type == LINEAR_SOLVER_FAILURE) {
+    return true;
+  }
+
+  // new_model_cost
+  //  = 1/2 [f + J * step]^2
+  //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
+  // model_cost_change
+  //  = cost - new_model_cost
+  //  = f'f/2  - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
+  //  = -f'J * step - step' * J' * J * step / 2
+  //  = -(J * step)'(f + J * step / 2)
+  model_residuals_.setZero();
+  jacobian_->RightMultiply(trust_region_step_.data(), model_residuals_.data());
+  model_cost_change_ =
+      -model_residuals_.dot(residuals_ + model_residuals_ / 2.0);
+
+  // TODO(sameeragarwal)
+  //
+  //  1. What happens if model_cost_change_ = 0
+  //  2. What happens if -epsilon <= model_cost_change_ < 0 for some
+  //     small epsilon due to round off error.
+  iteration_summary_.step_is_valid = (model_cost_change_ > 0.0);
+  if (iteration_summary_.step_is_valid) {
+    // Undo the Jacobian column scaling.
+    delta_ = (trust_region_step_.array() * jacobian_scaling_.array()).matrix();
+    num_consecutive_invalid_steps_ = 0;
+  }
+
+  VLOG_IF(1, is_not_silent_ && !iteration_summary_.step_is_valid)
+      << "Invalid step: current_cost: " << x_cost_
+      << " absolute model cost change: " << model_cost_change_
+      << " relative model cost change: " << (model_cost_change_ / x_cost_);
+  return true;
+}
+
+// Invalid steps can happen due to a number of reasons, and we allow a
+// limited number of consecutive failures, and return false if this
+// limit is exceeded.
+bool TrustRegionMinimizer::HandleInvalidStep() {
+  // TODO(sameeragarwal): Should we be returning FAILURE or
+  // NO_CONVERGENCE? The solution value is still usable in many cases,
+  // it is not clear if we should declare the solver a failure
+  // entirely. For example the case where model_cost_change ~ 0.0, but
+  // just slightly negative.
+  if (++num_consecutive_invalid_steps_ >=
+      options_.max_num_consecutive_invalid_steps) {
+    solver_summary_->message = StringPrintf(
+        "Number of consecutive invalid steps more "
+        "than Solver::Options::max_num_consecutive_invalid_steps: %d",
+        options_.max_num_consecutive_invalid_steps);
+    solver_summary_->termination_type = FAILURE;
+    return false;
+  }
+
+  strategy_->StepIsInvalid();
+
+  // We are going to try and reduce the trust region radius and
+  // solve again. To do this, we are going to treat this iteration
+  // as an unsuccessful iteration. Since the various callbacks are
+  // still executed, we are going to fill the iteration summary
+  // with data that assumes a step of length zero and no progress.
+  iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
+  iteration_summary_.cost_change = 0.0;
+  iteration_summary_.gradient_max_norm =
+      solver_summary_->iterations.back().gradient_max_norm;
+  iteration_summary_.gradient_norm =
+      solver_summary_->iterations.back().gradient_norm;
+  iteration_summary_.step_norm = 0.0;
+  iteration_summary_.relative_decrease = 0.0;
+  iteration_summary_.eta = options_.eta;
+  return true;
+}
+
+// Use the supplied coordinate descent minimizer to perform inner
+// iterations and compute the improvement due to it. Returns the cost
+// after performing the inner iterations.
+//
+// The optimization is performed with candidate_x_ as the starting
+// point, and if the optimization is successful, candidate_x_ will be
+// updated with the optimized parameters.
+void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
+  inner_iterations_were_useful_ = false;
+  if (!inner_iterations_are_enabled_ ||
+      candidate_cost_ >= std::numeric_limits<double>::max()) {
+    return;
+  }
+
+  double inner_iteration_start_time = WallTimeInSeconds();
+  ++solver_summary_->num_inner_iteration_steps;
+  inner_iteration_x_ = candidate_x_;
+  Solver::Summary inner_iteration_summary;
+  options_.inner_iteration_minimizer->Minimize(
+      options_, inner_iteration_x_.data(), &inner_iteration_summary);
+  double inner_iteration_cost;
+  if (!evaluator_->Evaluate(
+          inner_iteration_x_.data(), &inner_iteration_cost, NULL, NULL, NULL)) {
+    VLOG_IF(2, is_not_silent_) << "Inner iteration failed.";
+    return;
+  }
+
+  VLOG_IF(2, is_not_silent_)
+      << "Inner iteration succeeded; Current cost: " << x_cost_
+      << " Trust region step cost: " << candidate_cost_
+      << " Inner iteration cost: " << inner_iteration_cost;
+
+  candidate_x_ = inner_iteration_x_;
+
+  // Normally, the quality of a trust region step is measured by
+  // the ratio
+  //
+  //              cost_change
+  //    r =    -----------------
+  //           model_cost_change
+  //
+  // All the change in the nonlinear objective is due to the trust
+  // region step so this ratio is a good measure of the quality of
+  // the trust region radius. However, when inner iterations are
+  // being used, cost_change includes the contribution of the
+  // inner iterations and its not fair to credit it all to the
+  // trust region algorithm. So we change the ratio to be
+  //
+  //                              cost_change
+  //    r =    ------------------------------------------------
+  //           (model_cost_change + inner_iteration_cost_change)
+  //
+  // Practically we do this by increasing model_cost_change by
+  // inner_iteration_cost_change.
+
+  const double inner_iteration_cost_change =
+      candidate_cost_ - inner_iteration_cost;
+  model_cost_change_ += inner_iteration_cost_change;
+  inner_iterations_were_useful_ = inner_iteration_cost < x_cost_;
+  const double inner_iteration_relative_progress =
+      1.0 - inner_iteration_cost / candidate_cost_;
+
+  // Disable inner iterations once the relative improvement
+  // drops below tolerance.
+  inner_iterations_are_enabled_ =
+      (inner_iteration_relative_progress > options_.inner_iteration_tolerance);
+  VLOG_IF(2, is_not_silent_ && !inner_iterations_are_enabled_)
+      << "Disabling inner iterations. Progress : "
+      << inner_iteration_relative_progress;
+  candidate_cost_ = inner_iteration_cost;
+
+  solver_summary_->inner_iteration_time_in_seconds +=
+      WallTimeInSeconds() - inner_iteration_start_time;
+}
+
+// Perform a projected line search to improve the objective function
+// value along delta.
+//
+// TODO(sameeragarwal): The current implementation does not do
+// anything illegal but is incorrect and not terribly effective.
+//
+// https://github.com/ceres-solver/ceres-solver/issues/187
+void TrustRegionMinimizer::DoLineSearch(const Vector& x,
+                                        const Vector& gradient,
+                                        const double cost,
+                                        Vector* delta) {
+  LineSearchFunction line_search_function(evaluator_);
+
+  LineSearch::Options line_search_options;
+  line_search_options.is_silent = true;
+  line_search_options.interpolation_type =
+      options_.line_search_interpolation_type;
+  line_search_options.min_step_size = options_.min_line_search_step_size;
+  line_search_options.sufficient_decrease =
+      options_.line_search_sufficient_function_decrease;
+  line_search_options.max_step_contraction =
+      options_.max_line_search_step_contraction;
+  line_search_options.min_step_contraction =
+      options_.min_line_search_step_contraction;
+  line_search_options.max_num_iterations =
+      options_.max_num_line_search_step_size_iterations;
+  line_search_options.sufficient_curvature_decrease =
+      options_.line_search_sufficient_curvature_decrease;
+  line_search_options.max_step_expansion =
+      options_.max_line_search_step_expansion;
+  line_search_options.function = &line_search_function;
+
+  std::string message;
+  std::unique_ptr<LineSearch> line_search(
+      LineSearch::Create(ceres::ARMIJO, line_search_options, &message));
+  LineSearch::Summary line_search_summary;
+  line_search_function.Init(x, *delta);
+  line_search->Search(1.0, cost, gradient.dot(*delta), &line_search_summary);
+
+  solver_summary_->num_line_search_steps += line_search_summary.num_iterations;
+  solver_summary_->line_search_cost_evaluation_time_in_seconds +=
+      line_search_summary.cost_evaluation_time_in_seconds;
+  solver_summary_->line_search_gradient_evaluation_time_in_seconds +=
+      line_search_summary.gradient_evaluation_time_in_seconds;
+  solver_summary_->line_search_polynomial_minimization_time_in_seconds +=
+      line_search_summary.polynomial_minimization_time_in_seconds;
+  solver_summary_->line_search_total_time_in_seconds +=
+      line_search_summary.total_time_in_seconds;
+
+  if (line_search_summary.success) {
+    *delta *= line_search_summary.optimal_point.x;
+  }
+}
+
+// Check if the maximum amount of time allowed by the user for the
+// solver has been exceeded, and if so return false after updating
+// Solver::Summary::message.
+bool TrustRegionMinimizer::MaxSolverTimeReached() {
+  const double total_solver_time =
+      WallTimeInSeconds() - start_time_in_secs_ +
+      solver_summary_->preprocessor_time_in_seconds;
+  if (total_solver_time < options_.max_solver_time_in_seconds) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf("Maximum solver time reached. "
+                                          "Total solver time: %e >= %e.",
+                                          total_solver_time,
+                                          options_.max_solver_time_in_seconds);
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Check if the maximum number of iterations allowed by the user for
+// the solver has been exceeded, and if so return false after updating
+// Solver::Summary::message.
+bool TrustRegionMinimizer::MaxSolverIterationsReached() {
+  if (iteration_summary_.iteration < options_.max_num_iterations) {
+    return false;
+  }
+
+  solver_summary_->message =
+      StringPrintf("Maximum number of iterations reached. "
+                   "Number of iterations: %d.",
+                   iteration_summary_.iteration);
+
+  solver_summary_->termination_type = NO_CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Check convergence based on the max norm of the gradient (only for
+// iterations where the step was declared successful).
+bool TrustRegionMinimizer::GradientToleranceReached() {
+  if (!iteration_summary_.step_is_successful ||
+      iteration_summary_.gradient_max_norm > options_.gradient_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Gradient tolerance reached. "
+      "Gradient max norm: %e <= %e",
+      iteration_summary_.gradient_max_norm,
+      options_.gradient_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Check convergence based the size of the trust region radius.
+bool TrustRegionMinimizer::MinTrustRegionRadiusReached() {
+  if (iteration_summary_.trust_region_radius >
+      options_.min_trust_region_radius) {
+    return false;
+  }
+
+  solver_summary_->message =
+      StringPrintf("Minimum trust region radius reached. "
+                   "Trust region radius: %e <= %e",
+                   iteration_summary_.trust_region_radius,
+                   options_.min_trust_region_radius);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Solver::Options::parameter_tolerance based convergence check.
+bool TrustRegionMinimizer::ParameterToleranceReached() {
+  // Compute the norm of the step in the ambient space.
+  iteration_summary_.step_norm = (x_ - candidate_x_).norm();
+  const double step_size_tolerance =
+      options_.parameter_tolerance * (x_norm_ + options_.parameter_tolerance);
+
+  if (iteration_summary_.step_norm > step_size_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Parameter tolerance reached. "
+      "Relative step_norm: %e <= %e.",
+      (iteration_summary_.step_norm / (x_norm_ + options_.parameter_tolerance)),
+      options_.parameter_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Solver::Options::function_tolerance based convergence check.
+bool TrustRegionMinimizer::FunctionToleranceReached() {
+  iteration_summary_.cost_change = x_cost_ - candidate_cost_;
+  const double absolute_function_tolerance =
+      options_.function_tolerance * x_cost_;
+
+  if (fabs(iteration_summary_.cost_change) > absolute_function_tolerance) {
+    return false;
+  }
+
+  solver_summary_->message = StringPrintf(
+      "Function tolerance reached. "
+      "|cost_change|/cost: %e <= %e",
+      fabs(iteration_summary_.cost_change) / x_cost_,
+      options_.function_tolerance);
+  solver_summary_->termination_type = CONVERGENCE;
+  VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+  return true;
+}
+
+// Compute candidate_x_ = Plus(x_, delta_)
+// Evaluate the cost of candidate_x_ as candidate_cost_.
+//
+// Failure to compute the step or the cost mean that candidate_cost_
+// is set to std::numeric_limits<double>::max(). Unlike
+// EvaluateGradientAndJacobian, failure in this function is not fatal
+// as we are only computing and evaluating a candidate point, and if
+// for some reason we are unable to evaluate it, we consider it to be
+// a point with very high cost. This allows the user to deal with edge
+// cases/constraints as part of the LocalParameterization and
+// CostFunction objects.
+void TrustRegionMinimizer::ComputeCandidatePointAndEvaluateCost() {
+  if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
+    LOG_IF(WARNING, is_not_silent_)
+        << "x_plus_delta = Plus(x, delta) failed. "
+        << "Treating it as a step with infinite cost";
+    candidate_cost_ = std::numeric_limits<double>::max();
+    return;
+  }
+
+  if (!evaluator_->Evaluate(
+          candidate_x_.data(), &candidate_cost_, NULL, NULL, NULL)) {
+    LOG_IF(WARNING, is_not_silent_)
+        << "Step failed to evaluate. "
+        << "Treating it as a step with infinite cost";
+    candidate_cost_ = std::numeric_limits<double>::max();
+  }
+}
+
+bool TrustRegionMinimizer::IsStepSuccessful() {
+  iteration_summary_.relative_decrease =
+      step_evaluator_->StepQuality(candidate_cost_, model_cost_change_);
+
+  // In most cases, boosting the model_cost_change by the
+  // improvement caused by the inner iterations is fine, but it can
+  // be the case that the original trust region step was so bad that
+  // the resulting improvement in the cost was negative, and the
+  // change caused by the inner iterations was large enough to
+  // improve the step, but also to make relative decrease quite
+  // small.
+  //
+  // This can cause the trust region loop to reject this step. To
+  // get around this, we explicitly check if the inner iterations
+  // led to a net decrease in the objective function value. If
+  // they did, we accept the step even if the trust region ratio
+  // is small.
+  //
+  // Notice that we do not just check that cost_change is positive
+  // which is a weaker condition and would render the
+  // min_relative_decrease threshold useless. Instead, we keep
+  // track of inner_iterations_were_useful, which is true only
+  // when inner iterations lead to a net decrease in the cost.
+  return (inner_iterations_were_useful_ ||
+          iteration_summary_.relative_decrease >
+              options_.min_relative_decrease);
+}
+
+// Declare the step successful, move to candidate_x, update the
+// derivatives and let the trust region strategy and the step
+// evaluator know that the step has been accepted.
+bool TrustRegionMinimizer::HandleSuccessfulStep() {
+  x_ = candidate_x_;
+  x_norm_ = x_.norm();
+
+  // Since the step was successful, this point has already had the residual
+  // evaluated (but not the jacobian). So indicate that to the evaluator.
+  if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/false)) {
+    return false;
+  }
+
+  iteration_summary_.step_is_successful = true;
+  strategy_->StepAccepted(iteration_summary_.relative_decrease);
+  step_evaluator_->StepAccepted(candidate_cost_, model_cost_change_);
+  return true;
+}
+
+// Declare the step unsuccessful and inform the trust region strategy.
+void TrustRegionMinimizer::HandleUnsuccessfulStep() {
+  iteration_summary_.step_is_successful = false;
+  strategy_->StepRejected(iteration_summary_.relative_decrease);
+  iteration_summary_.cost = candidate_cost_ + solver_summary_->fixed_cost;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_minimizer.h b/internal/ceres/trust_region_minimizer.h
new file mode 100644
index 0000000..8ddd77e
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer.h
@@ -0,0 +1,166 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+#define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+
+#include <memory>
+#include "ceres/internal/eigen.h"
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_step_evaluator.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// Generic trust region minimization algorithm.
+//
+// For example usage, see SolverImpl::Minimize.
+class TrustRegionMinimizer : public Minimizer {
+ public:
+  ~TrustRegionMinimizer();
+
+  // This method is not thread safe.
+  virtual void Minimize(const Minimizer::Options& options,
+                        double* parameters,
+                        Solver::Summary* solver_summary);
+
+ private:
+  void Init(const Minimizer::Options& options,
+            double* parameters,
+            Solver::Summary* solver_summary);
+  bool IterationZero();
+  bool FinalizeIterationAndCheckIfMinimizerCanContinue();
+  bool ComputeTrustRegionStep();
+
+  bool EvaluateGradientAndJacobian(bool new_evaluation_point);
+  void ComputeCandidatePointAndEvaluateCost();
+
+  void DoLineSearch(const Vector& x,
+                    const Vector& gradient,
+                    const double cost,
+                    Vector* delta);
+  void DoInnerIterationsIfNeeded();
+
+  bool ParameterToleranceReached();
+  bool FunctionToleranceReached();
+  bool GradientToleranceReached();
+  bool MaxSolverTimeReached();
+  bool MaxSolverIterationsReached();
+  bool MinTrustRegionRadiusReached();
+
+  bool IsStepSuccessful();
+  void HandleUnsuccessfulStep();
+  bool HandleSuccessfulStep();
+  bool HandleInvalidStep();
+
+  Minimizer::Options options_;
+
+  // These pointers are shortcuts to objects passed to the
+  // TrustRegionMinimizer. The TrustRegionMinimizer does not own them.
+  double* parameters_;
+  Solver::Summary* solver_summary_;
+  Evaluator* evaluator_;
+  SparseMatrix* jacobian_;
+  TrustRegionStrategy* strategy_;
+
+  std::unique_ptr<TrustRegionStepEvaluator> step_evaluator_;
+
+  bool is_not_silent_;
+  bool inner_iterations_are_enabled_;
+  bool inner_iterations_were_useful_;
+
+  // Summary of the current iteration.
+  IterationSummary iteration_summary_;
+
+  // Dimensionality of the problem in the ambient space.
+  int num_parameters_;
+  // Dimensionality of the problem in the tangent space. This is the
+  // number of columns in the Jacobian.
+  int num_effective_parameters_;
+  // Length of the residual vector, also the number of rows in the Jacobian.
+  int num_residuals_;
+
+  // Current point.
+  Vector x_;
+  // Residuals at x_;
+  Vector residuals_;
+  // Gradient at x_.
+  Vector gradient_;
+  // Solution computed by the inner iterations.
+  Vector inner_iteration_x_;
+  // model_residuals = J * trust_region_step
+  Vector model_residuals_;
+  Vector negative_gradient_;
+  // projected_gradient_step = Plus(x, -gradient), an intermediate
+  // quantity used to compute the projected gradient norm.
+  Vector projected_gradient_step_;
+  // The step computed by the trust region strategy. If Jacobi scaling
+  // is enabled, this is a vector in the scaled space.
+  Vector trust_region_step_;
+  // The current proposal for how far the trust region algorithm
+  // thinks we should move. In the most basic case, it is just the
+  // trust_region_step_ with the Jacobi scaling undone. If bounds
+  // constraints are present, then it is the result of the projected
+  // line search.
+  Vector delta_;
+  // candidate_x  = Plus(x, delta)
+  Vector candidate_x_;
+  // Scaling vector to scale the columns of the Jacobian.
+  Vector jacobian_scaling_;
+
+  // Euclidean norm of x_.
+  double x_norm_;
+  // Cost at x_.
+  double x_cost_;
+  // Minimum cost encountered up till now.
+  double minimum_cost_;
+  // How much did the trust region strategy reduce the cost of the
+  // linearized Gauss-Newton model.
+  double model_cost_change_;
+  // Cost at candidate_x_.
+  double candidate_cost_;
+
+  // Time at which the minimizer was started.
+  double start_time_in_secs_;
+  // Time at which the current iteration was started.
+  double iteration_start_time_in_secs_;
+  // Number of consecutive steps where the minimizer loop computed a
+  // numerically invalid step.
+  int num_consecutive_invalid_steps_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
diff --git a/internal/ceres/trust_region_minimizer_test.cc b/internal/ceres/trust_region_minimizer_test.cc
new file mode 100644
index 0000000..0c4ea29
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer_test.cc
@@ -0,0 +1,425 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//
+// This tests the TrustRegionMinimizer loop using a direct Evaluator
+// implementation, rather than having a test that goes through all the
+// Program and Problem machinery.
+
+#include <cmath>
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/cost_function.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/trust_region_strategy.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Templated Evaluator for Powell's function. The template parameters
+// indicate which of the four variables/columns of the jacobian are
+// active. This is equivalent to constructing a problem and using the
+// SubsetLocalParameterization. This allows us to test the support for
+// the Evaluator::Plus operation besides checking for the basic
+// performance of the trust region algorithm.
+template <bool col1, bool col2, bool col3, bool col4>
+class PowellEvaluator2 : public Evaluator {
+ public:
+  PowellEvaluator2()
+      : num_active_cols_(
+          (col1 ? 1 : 0) +
+          (col2 ? 1 : 0) +
+          (col3 ? 1 : 0) +
+          (col4 ? 1 : 0)) {
+    VLOG(1) << "Columns: "
+            << col1 << " "
+            << col2 << " "
+            << col3 << " "
+            << col4;
+  }
+
+  virtual ~PowellEvaluator2() {}
+
+  // Implementation of Evaluator interface.
+  virtual SparseMatrix* CreateJacobian() const {
+    CHECK(col1 || col2 || col3 || col4);
+    DenseSparseMatrix* dense_jacobian =
+        new DenseSparseMatrix(NumResiduals(), NumEffectiveParameters());
+    dense_jacobian->SetZero();
+    return dense_jacobian;
+  }
+
+  virtual bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
+                        const double* state,
+                        double* cost,
+                        double* residuals,
+                        double* gradient,
+                        SparseMatrix* jacobian) {
+    const double x1 = state[0];
+    const double x2 = state[1];
+    const double x3 = state[2];
+    const double x4 = state[3];
+
+    VLOG(1) << "State: "
+            << "x1=" << x1 << ", "
+            << "x2=" << x2 << ", "
+            << "x3=" << x3 << ", "
+            << "x4=" << x4 << ".";
+
+    const double f1 = x1 + 10.0 * x2;
+    const double f2 = sqrt(5.0) * (x3 - x4);
+    const double f3 = pow(x2 - 2.0 * x3, 2.0);
+    const double f4 = sqrt(10.0) * pow(x1 - x4, 2.0);
+
+    VLOG(1) << "Function: "
+            << "f1=" << f1 << ", "
+            << "f2=" << f2 << ", "
+            << "f3=" << f3 << ", "
+            << "f4=" << f4 << ".";
+
+    *cost = (f1*f1 + f2*f2 + f3*f3 + f4*f4) / 2.0;
+
+    VLOG(1) << "Cost: " << *cost;
+
+    if (residuals != NULL) {
+      residuals[0] = f1;
+      residuals[1] = f2;
+      residuals[2] = f3;
+      residuals[3] = f4;
+    }
+
+    if (jacobian != NULL) {
+      DenseSparseMatrix* dense_jacobian;
+      dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
+      dense_jacobian->SetZero();
+
+      ColMajorMatrixRef jacobian_matrix = dense_jacobian->mutable_matrix();
+      CHECK_EQ(jacobian_matrix.cols(), num_active_cols_);
+
+      int column_index = 0;
+      if (col1) {
+        jacobian_matrix.col(column_index++) <<
+            1.0,
+            0.0,
+            0.0,
+            sqrt(10.0) * 2.0 * (x1 - x4) * (1.0 - x4);
+      }
+      if (col2) {
+        jacobian_matrix.col(column_index++) <<
+            10.0,
+            0.0,
+            2.0*(x2 - 2.0*x3)*(1.0 - 2.0*x3),
+            0.0;
+      }
+
+      if (col3) {
+        jacobian_matrix.col(column_index++) <<
+            0.0,
+            sqrt(5.0),
+            2.0*(x2 - 2.0*x3)*(x2 - 2.0),
+            0.0;
+      }
+
+      if (col4) {
+        jacobian_matrix.col(column_index++) <<
+            0.0,
+            -sqrt(5.0),
+            0.0,
+            sqrt(10.0) * 2.0 * (x1 - x4) * (x1 - 1.0);
+      }
+      VLOG(1) << "\n" << jacobian_matrix;
+    }
+
+    if (gradient != NULL) {
+      int column_index = 0;
+      if (col1) {
+        gradient[column_index++] = f1  + f4 * sqrt(10.0) * 2.0 * (x1 - x4);
+      }
+
+      if (col2) {
+        gradient[column_index++] = f1 * 10.0 + f3 * 2.0 * (x2 - 2.0 * x3);
+      }
+
+      if (col3) {
+        gradient[column_index++] =
+            f2 * sqrt(5.0) + f3 * (2.0 * 2.0 * (2.0 * x3 - x2));
+      }
+
+      if (col4) {
+        gradient[column_index++] =
+            -f2 * sqrt(5.0) + f4 * sqrt(10.0) * 2.0 * (x4 - x1);
+      }
+    }
+
+    return true;
+  }
+
+  virtual bool Plus(const double* state,
+                    const double* delta,
+                    double* state_plus_delta) const {
+    int delta_index = 0;
+    state_plus_delta[0] = (col1  ? state[0] + delta[delta_index++] : state[0]);
+    state_plus_delta[1] = (col2  ? state[1] + delta[delta_index++] : state[1]);
+    state_plus_delta[2] = (col3  ? state[2] + delta[delta_index++] : state[2]);
+    state_plus_delta[3] = (col4  ? state[3] + delta[delta_index++] : state[3]);
+    return true;
+  }
+
+  virtual int NumEffectiveParameters() const { return num_active_cols_; }
+  virtual int NumParameters()          const { return 4; }
+  virtual int NumResiduals()           const { return 4; }
+
+ private:
+  const int num_active_cols_;
+};
+
+// Templated function to hold a subset of the columns fixed and check
+// if the solver converges to the optimal values or not.
+template<bool col1, bool col2, bool col3, bool col4>
+void IsTrustRegionSolveSuccessful(TrustRegionStrategyType strategy_type) {
+  Solver::Options solver_options;
+  LinearSolver::Options linear_solver_options;
+  DenseQRSolver linear_solver(linear_solver_options);
+
+  double parameters[4] = { 3, -1, 0, 1.0 };
+
+  // If the column is inactive, then set its value to the optimal
+  // value.
+  parameters[0] = (col1 ? parameters[0] : 0.0);
+  parameters[1] = (col2 ? parameters[1] : 0.0);
+  parameters[2] = (col3 ? parameters[2] : 0.0);
+  parameters[3] = (col4 ? parameters[3] : 0.0);
+
+  Minimizer::Options minimizer_options(solver_options);
+  minimizer_options.gradient_tolerance = 1e-26;
+  minimizer_options.function_tolerance = 1e-26;
+  minimizer_options.parameter_tolerance = 1e-26;
+  minimizer_options.evaluator.reset(
+      new PowellEvaluator2<col1, col2, col3, col4>);
+  minimizer_options.jacobian.reset(
+      minimizer_options.evaluator->CreateJacobian());
+
+  TrustRegionStrategy::Options trust_region_strategy_options;
+  trust_region_strategy_options.trust_region_strategy_type = strategy_type;
+  trust_region_strategy_options.linear_solver = &linear_solver;
+  trust_region_strategy_options.initial_radius = 1e4;
+  trust_region_strategy_options.max_radius = 1e20;
+  trust_region_strategy_options.min_lm_diagonal = 1e-6;
+  trust_region_strategy_options.max_lm_diagonal = 1e32;
+  minimizer_options.trust_region_strategy.reset(
+      TrustRegionStrategy::Create(trust_region_strategy_options));
+
+  TrustRegionMinimizer minimizer;
+  Solver::Summary summary;
+  minimizer.Minimize(minimizer_options, parameters, &summary);
+
+  // The minimum is at x1 = x2 = x3 = x4 = 0.
+  EXPECT_NEAR(0.0, parameters[0], 0.001);
+  EXPECT_NEAR(0.0, parameters[1], 0.001);
+  EXPECT_NEAR(0.0, parameters[2], 0.001);
+  EXPECT_NEAR(0.0, parameters[3], 0.001);
+}
+
+TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingLevenbergMarquardt) {
+  // This case is excluded because this has a local minimum and does
+  // not find the optimum. This should not affect the correctness of
+  // this test since we are testing all the other 14 combinations of
+  // column activations.
+  //
+  //   IsSolveSuccessful<true, true, false, true>();
+
+  const TrustRegionStrategyType kStrategy = LEVENBERG_MARQUARDT;
+  IsTrustRegionSolveSuccessful<true,  true,  true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  true,  true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  true,  false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, false, true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  false, true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+}
+
+TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingDogleg) {
+  // The following two cases are excluded because they encounter a
+  // local minimum.
+  //
+  //  IsTrustRegionSolveSuccessful<true, true, false, true >(kStrategy);
+  //  IsTrustRegionSolveSuccessful<true,  true,  true,  true >(kStrategy);
+
+  const TrustRegionStrategyType kStrategy = DOGLEG;
+  IsTrustRegionSolveSuccessful<true,  true,  true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  true,  false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, false, true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  false, true >(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, true,  true >(kStrategy);
+  IsTrustRegionSolveSuccessful<true,  false, false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, true,  false, false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, true,  false>(kStrategy);
+  IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+}
+
+
+class CurveCostFunction : public CostFunction {
+ public:
+  CurveCostFunction(int num_vertices, double target_length)
+      : num_vertices_(num_vertices), target_length_(target_length) {
+    set_num_residuals(1);
+    for (int i = 0; i < num_vertices_; ++i) {
+      mutable_parameter_block_sizes()->push_back(2);
+    }
+  }
+
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    residuals[0] = target_length_;
+
+    for (int i = 0; i < num_vertices_; ++i) {
+      int prev = (num_vertices_ + i - 1) % num_vertices_;
+      double length = 0.0;
+      for (int dim = 0; dim < 2; dim++) {
+        const double diff = parameters[prev][dim] - parameters[i][dim];
+        length += diff * diff;
+      }
+      residuals[0] -= sqrt(length);
+    }
+
+    if (jacobians == NULL) {
+      return true;
+    }
+
+    for (int i = 0; i < num_vertices_; ++i) {
+      if (jacobians[i] != NULL) {
+        int prev = (num_vertices_ + i - 1) % num_vertices_;
+        int next = (i + 1) % num_vertices_;
+
+        double u[2], v[2];
+        double norm_u = 0., norm_v = 0.;
+        for (int dim = 0; dim < 2; dim++) {
+          u[dim] = parameters[i][dim] - parameters[prev][dim];
+          norm_u += u[dim] * u[dim];
+          v[dim] = parameters[next][dim] - parameters[i][dim];
+          norm_v += v[dim] * v[dim];
+        }
+
+        norm_u = sqrt(norm_u);
+        norm_v = sqrt(norm_v);
+
+        for (int dim = 0; dim < 2; dim++) {
+          jacobians[i][dim] = 0.;
+
+          if (norm_u > std::numeric_limits< double >::min()) {
+            jacobians[i][dim] -= u[dim] / norm_u;
+          }
+
+          if (norm_v > std::numeric_limits< double >::min()) {
+            jacobians[i][dim] += v[dim] / norm_v;
+          }
+        }
+      }
+    }
+
+    return true;
+  }
+
+ private:
+  int     num_vertices_;
+  double  target_length_;
+};
+
+TEST(TrustRegionMinimizer, JacobiScalingTest) {
+  int N = 6;
+  std::vector<double*> y(N);
+  const double pi = 3.1415926535897932384626433;
+  for (int i = 0; i < N; i++) {
+    double theta = i * 2. * pi/ static_cast< double >(N);
+    y[i] = new double[2];
+    y[i][0] = cos(theta);
+    y[i][1] = sin(theta);
+  }
+
+  Problem problem;
+  problem.AddResidualBlock(new CurveCostFunction(N, 10.), NULL, y);
+  Solver::Options options;
+  options.linear_solver_type = ceres::DENSE_QR;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_LE(summary.final_cost, 1e-10);
+
+  for (int i = 0; i < N; i++) {
+    delete []y[i];
+  }
+}
+
+struct ExpCostFunctor {
+  template <typename T>
+  bool operator()(const T* const x, T* residual) const {
+    residual[0] = T(10.0) - exp(x[0]);
+    return true;
+  }
+
+  static CostFunction* Create() {
+    return new AutoDiffCostFunction<ExpCostFunctor, 1, 1>(
+        new ExpCostFunctor);
+  }
+};
+
+TEST(TrustRegionMinimizer, GradientToleranceConvergenceUpdatesStep) {
+  double x = 5;
+  Problem problem;
+  problem.AddResidualBlock(ExpCostFunctor::Create(), NULL, &x);
+  problem.SetParameterLowerBound(&x, 0, 3.0);
+  Solver::Options options;
+  Solver::Summary summary;
+  Solve(options, &problem, &summary);
+  EXPECT_NEAR(3.0, x, 1e-12);
+  const double expected_final_cost = 0.5 * pow(10.0 - exp(3.0), 2);
+  EXPECT_NEAR(expected_final_cost, summary.final_cost, 1e-12);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_preprocessor.cc b/internal/ceres/trust_region_preprocessor.cc
new file mode 100644
index 0000000..aa7f095
--- /dev/null
+++ b/internal/ceres/trust_region_preprocessor.cc
@@ -0,0 +1,385 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/trust_region_preprocessor.h"
+
+#include <numeric>
+#include <string>
+#include "ceres/callbacks.h"
+#include "ceres/context_impl.h"
+#include "ceres/evaluator.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/parameter_block.h"
+#include "ceres/preconditioner.h"
+#include "ceres/preprocessor.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/reorder_program.h"
+#include "ceres/suitesparse.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+namespace {
+
+ParameterBlockOrdering* CreateDefaultLinearSolverOrdering(
+    const Program& program) {
+  ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
+  const vector<ParameterBlock*>& parameter_blocks =
+      program.parameter_blocks();
+  for (int i = 0; i < parameter_blocks.size(); ++i) {
+    ordering->AddElementToGroup(
+        const_cast<double*>(parameter_blocks[i]->user_state()), 0);
+  }
+  return ordering;
+}
+
+// Check if all the user supplied values in the parameter blocks are
+// sane or not, and if the program is feasible or not.
+bool IsProgramValid(const Program& program, std::string* error) {
+  return (program.ParameterBlocksAreFinite(error) &&
+          program.IsFeasible(error));
+}
+
+void AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
+    Solver::Options* options) {
+  if (!IsSchurType(options->linear_solver_type)) {
+    return;
+  }
+
+  const LinearSolverType linear_solver_type_given = options->linear_solver_type;
+  const PreconditionerType preconditioner_type_given =
+      options->preconditioner_type;
+  options->linear_solver_type = LinearSolver::LinearSolverForZeroEBlocks(
+      linear_solver_type_given);
+
+  std::string message;
+  if (linear_solver_type_given == ITERATIVE_SCHUR) {
+    options->preconditioner_type = Preconditioner::PreconditionerForZeroEBlocks(
+        preconditioner_type_given);
+
+    message =
+        StringPrintf(
+            "No E blocks. Switching from %s(%s) to %s(%s).",
+            LinearSolverTypeToString(linear_solver_type_given),
+            PreconditionerTypeToString(preconditioner_type_given),
+            LinearSolverTypeToString(options->linear_solver_type),
+            PreconditionerTypeToString(options->preconditioner_type));
+  } else {
+    message =
+        StringPrintf(
+            "No E blocks. Switching from %s to %s.",
+            LinearSolverTypeToString(linear_solver_type_given),
+            LinearSolverTypeToString(options->linear_solver_type));
+  }
+
+  VLOG_IF(1, options->logging_type != SILENT) << message;
+}
+
+// For Schur type and SPARSE_NORMAL_CHOLESKY linear solvers, reorder
+// the program to reduce fill-in and increase cache coherency.
+bool ReorderProgram(PreprocessedProblem* pp) {
+  const Solver::Options& options = pp->options;
+  if (IsSchurType(options.linear_solver_type)) {
+    return ReorderProgramForSchurTypeLinearSolver(
+        options.linear_solver_type,
+        options.sparse_linear_algebra_library_type,
+        pp->problem->parameter_map(),
+        options.linear_solver_ordering.get(),
+        pp->reduced_program.get(),
+        &pp->error);
+  }
+
+
+  if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+      !options.dynamic_sparsity) {
+    return ReorderProgramForSparseNormalCholesky(
+        options.sparse_linear_algebra_library_type,
+        *options.linear_solver_ordering,
+        pp->reduced_program.get(),
+        &pp->error);
+  }
+
+  return true;
+}
+
+// Configure and create a linear solver object. In doing so, if a
+// sparse direct factorization based linear solver is being used, then
+// find a fill reducing ordering and reorder the program as needed
+// too.
+bool SetupLinearSolver(PreprocessedProblem* pp) {
+  Solver::Options& options = pp->options;
+  if (options.linear_solver_ordering.get() == NULL) {
+    // If the user has not supplied a linear solver ordering, then we
+    // assume that they are giving all the freedom to us in choosing
+    // the best possible ordering. This intent can be indicated by
+    // putting all the parameter blocks in the same elimination group.
+    options.linear_solver_ordering.reset(
+        CreateDefaultLinearSolverOrdering(*pp->reduced_program));
+  } else {
+    // If the user supplied an ordering, then check if the first
+    // elimination group is still non-empty after the reduced problem
+    // has been constructed.
+    //
+    // This is important for Schur type linear solvers, where the
+    // first elimination group is special -- it needs to be an
+    // independent set.
+    //
+    // If the first elimination group is empty, then we cannot use the
+    // user's requested linear solver (and a preconditioner as the
+    // case may be) so we must use a different one.
+    ParameterBlockOrdering* ordering = options.linear_solver_ordering.get();
+    const int min_group_id = ordering->MinNonZeroGroup();
+    ordering->Remove(pp->removed_parameter_blocks);
+    if (IsSchurType(options.linear_solver_type) &&
+        min_group_id != ordering->MinNonZeroGroup()) {
+      AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
+          &options);
+    }
+  }
+
+  // Reorder the program to reduce fill in and improve cache coherency
+  // of the Jacobian.
+  if (!ReorderProgram(pp)) {
+    return false;
+  }
+
+  // Configure the linear solver.
+  pp->linear_solver_options = LinearSolver::Options();
+  pp->linear_solver_options.min_num_iterations =
+      options.min_linear_solver_iterations;
+  pp->linear_solver_options.max_num_iterations =
+      options.max_linear_solver_iterations;
+  pp->linear_solver_options.type = options.linear_solver_type;
+  pp->linear_solver_options.preconditioner_type = options.preconditioner_type;
+  pp->linear_solver_options.visibility_clustering_type =
+      options.visibility_clustering_type;
+  pp->linear_solver_options.sparse_linear_algebra_library_type =
+      options.sparse_linear_algebra_library_type;
+  pp->linear_solver_options.dense_linear_algebra_library_type =
+      options.dense_linear_algebra_library_type;
+  pp->linear_solver_options.use_explicit_schur_complement =
+      options.use_explicit_schur_complement;
+  pp->linear_solver_options.dynamic_sparsity = options.dynamic_sparsity;
+  pp->linear_solver_options.use_mixed_precision_solves =
+      options.use_mixed_precision_solves;
+  pp->linear_solver_options.max_num_refinement_iterations =
+      options.max_num_refinement_iterations;
+  pp->linear_solver_options.num_threads = options.num_threads;
+  pp->linear_solver_options.use_postordering = options.use_postordering;
+  pp->linear_solver_options.context = pp->problem->context();
+
+  if (IsSchurType(pp->linear_solver_options.type)) {
+    OrderingToGroupSizes(options.linear_solver_ordering.get(),
+                         &pp->linear_solver_options.elimination_groups);
+
+    // Schur type solvers expect at least two elimination groups. If
+    // there is only one elimination group, then it is guaranteed that
+    // this group only contains e_blocks. Thus we add a dummy
+    // elimination group with zero blocks in it.
+    if (pp->linear_solver_options.elimination_groups.size() == 1) {
+      pp->linear_solver_options.elimination_groups.push_back(0);
+    }
+
+    if (options.linear_solver_type == SPARSE_SCHUR) {
+      // When using SPARSE_SCHUR, we ignore the user's postordering
+      // preferences in certain cases.
+      //
+      // 1. SUITE_SPARSE is the sparse linear algebra library requested
+      //    but cholmod_camd is not available.
+      // 2. CX_SPARSE is the sparse linear algebra library requested.
+      //
+      // This ensures that the linear solver does not assume that a
+      // fill-reducing pre-ordering has been done.
+      //
+      // TODO(sameeragarwal): Implement the reordering of parameter
+      // blocks for CX_SPARSE.
+      if ((options.sparse_linear_algebra_library_type == SUITE_SPARSE &&
+           !SuiteSparse::
+           IsConstrainedApproximateMinimumDegreeOrderingAvailable()) ||
+          (options.sparse_linear_algebra_library_type == CX_SPARSE)) {
+        pp->linear_solver_options.use_postordering = true;
+      }
+    }
+  }
+
+  pp->linear_solver.reset(LinearSolver::Create(pp->linear_solver_options));
+  return (pp->linear_solver.get() != NULL);
+}
+
+// Configure and create the evaluator.
+bool SetupEvaluator(PreprocessedProblem* pp) {
+  const Solver::Options& options = pp->options;
+  pp->evaluator_options = Evaluator::Options();
+  pp->evaluator_options.linear_solver_type = options.linear_solver_type;
+  pp->evaluator_options.num_eliminate_blocks = 0;
+  if (IsSchurType(options.linear_solver_type)) {
+    pp->evaluator_options.num_eliminate_blocks =
+        options
+        .linear_solver_ordering
+        ->group_to_elements().begin()
+        ->second.size();
+  }
+
+  pp->evaluator_options.num_threads = options.num_threads;
+  pp->evaluator_options.dynamic_sparsity = options.dynamic_sparsity;
+  pp->evaluator_options.context = pp->problem->context();
+  pp->evaluator_options.evaluation_callback = options.evaluation_callback;
+  pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
+                                        pp->reduced_program.get(),
+                                        &pp->error));
+
+  return (pp->evaluator.get() != NULL);
+}
+
+// If the user requested inner iterations, then find an inner
+// iteration ordering as needed and configure and create a
+// CoordinateDescentMinimizer object to perform the inner iterations.
+bool SetupInnerIterationMinimizer(PreprocessedProblem* pp) {
+  Solver::Options& options = pp->options;
+  if (!options.use_inner_iterations) {
+    return true;
+  }
+
+  // With just one parameter block, the outer iteration of the trust
+  // region method and inner iterations are doing exactly the same
+  // thing, and thus inner iterations are not needed.
+  if (pp->reduced_program->NumParameterBlocks() == 1) {
+    LOG(WARNING) << "Reduced problem only contains one parameter block."
+                 << "Disabling inner iterations.";
+    return true;
+  }
+
+  if (options.inner_iteration_ordering.get() != NULL) {
+    // If the user supplied an ordering, then remove the set of
+    // inactive parameter blocks from it
+    options.inner_iteration_ordering->Remove(pp->removed_parameter_blocks);
+    if (options.inner_iteration_ordering->NumElements() == 0) {
+      LOG(WARNING) << "No remaining elements in the inner iteration ordering.";
+      return true;
+    }
+
+    // Validate the reduced ordering.
+    if (!CoordinateDescentMinimizer::IsOrderingValid(
+            *pp->reduced_program,
+            *options.inner_iteration_ordering,
+            &pp->error)) {
+      return false;
+    }
+  } else {
+    // The user did not supply an ordering, so create one.
+    options.inner_iteration_ordering.reset(
+        CoordinateDescentMinimizer::CreateOrdering(*pp->reduced_program));
+  }
+
+  pp->inner_iteration_minimizer.reset(
+      new CoordinateDescentMinimizer(pp->problem->context()));
+  return pp->inner_iteration_minimizer->Init(*pp->reduced_program,
+                                             pp->problem->parameter_map(),
+                                             *options.inner_iteration_ordering,
+                                             &pp->error);
+}
+
+// Configure and create a TrustRegionMinimizer object.
+void SetupMinimizerOptions(PreprocessedProblem* pp) {
+  const Solver::Options& options = pp->options;
+
+  SetupCommonMinimizerOptions(pp);
+  pp->minimizer_options.is_constrained =
+      pp->reduced_program->IsBoundsConstrained();
+  pp->minimizer_options.jacobian.reset(pp->evaluator->CreateJacobian());
+  pp->minimizer_options.inner_iteration_minimizer =
+      pp->inner_iteration_minimizer;
+
+  TrustRegionStrategy::Options strategy_options;
+  strategy_options.linear_solver = pp->linear_solver.get();
+  strategy_options.initial_radius =
+      options.initial_trust_region_radius;
+  strategy_options.max_radius = options.max_trust_region_radius;
+  strategy_options.min_lm_diagonal = options.min_lm_diagonal;
+  strategy_options.max_lm_diagonal = options.max_lm_diagonal;
+  strategy_options.trust_region_strategy_type =
+      options.trust_region_strategy_type;
+  strategy_options.dogleg_type = options.dogleg_type;
+  pp->minimizer_options.trust_region_strategy.reset(
+      TrustRegionStrategy::Create(strategy_options));
+  CHECK(pp->minimizer_options.trust_region_strategy != nullptr);
+}
+
+}  // namespace
+
+TrustRegionPreprocessor::~TrustRegionPreprocessor() {
+}
+
+bool TrustRegionPreprocessor::Preprocess(const Solver::Options& options,
+                                         ProblemImpl* problem,
+                                         PreprocessedProblem* pp) {
+  CHECK(pp != nullptr);
+  pp->options = options;
+  ChangeNumThreadsIfNeeded(&pp->options);
+
+  pp->problem = problem;
+  Program* program = problem->mutable_program();
+  if (!IsProgramValid(*program, &pp->error)) {
+    return false;
+  }
+
+  pp->reduced_program.reset(
+      program->CreateReducedProgram(&pp->removed_parameter_blocks,
+                                    &pp->fixed_cost,
+                                    &pp->error));
+
+  if (pp->reduced_program.get() == NULL) {
+    return false;
+  }
+
+  if (pp->reduced_program->NumParameterBlocks() == 0) {
+    // The reduced problem has no parameter or residual blocks. There
+    // is nothing more to do.
+    return true;
+  }
+
+  if (!SetupLinearSolver(pp) ||
+      !SetupEvaluator(pp) ||
+      !SetupInnerIterationMinimizer(pp)) {
+    return false;
+  }
+
+  SetupMinimizerOptions(pp);
+  return true;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_preprocessor.h b/internal/ceres/trust_region_preprocessor.h
new file mode 100644
index 0000000..a6631ab
--- /dev/null
+++ b/internal/ceres/trust_region_preprocessor.h
@@ -0,0 +1,50 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_PREPROCESSOR_H_
+#define CERES_INTERNAL_TRUST_REGION_PREPROCESSOR_H_
+
+#include "ceres/preprocessor.h"
+
+namespace ceres {
+namespace internal {
+
+class TrustRegionPreprocessor : public Preprocessor {
+ public:
+  virtual ~TrustRegionPreprocessor();
+  virtual bool Preprocess(const Solver::Options& options,
+                          ProblemImpl* problem,
+                          PreprocessedProblem* preprocessed_problem);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRUST_REGION_PREPROCESSOR_H_
diff --git a/internal/ceres/trust_region_preprocessor_test.cc b/internal/ceres/trust_region_preprocessor_test.cc
new file mode 100644
index 0000000..40338c1
--- /dev/null
+++ b/internal/ceres/trust_region_preprocessor_test.cc
@@ -0,0 +1,356 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <array>
+#include <map>
+
+#include "ceres/ordered_groups.h"
+#include "ceres/problem_impl.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+#include "ceres/trust_region_preprocessor.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(TrustRegionPreprocessor, ZeroProblem) {
+  ProblemImpl problem;
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(TrustRegionPreprocessor, ProblemWithInvalidParameterBlock) {
+  ProblemImpl problem;
+  double x = std::numeric_limits<double>::quiet_NaN();
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(TrustRegionPreprocessor, ParameterBlockBoundsAreInvalid) {
+  ProblemImpl problem;
+  double x = 1.0;
+  problem.AddParameterBlock(&x, 1);
+  problem.SetParameterUpperBound(&x, 0, 1.0);
+  problem.SetParameterLowerBound(&x, 0, 2.0);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(TrustRegionPreprocessor, ParamterBlockIsInfeasible) {
+  ProblemImpl problem;
+  double x = 3.0;
+  problem.AddParameterBlock(&x, 1);
+  problem.SetParameterUpperBound(&x, 0, 1.0);
+  problem.SetParameterLowerBound(&x, 0, 2.0);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+class FailingCostFunction : public SizedCostFunction<1, 1> {
+ public:
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    return false;
+  }
+};
+
+TEST(TrustRegionPreprocessor, RemoveParameterBlocksFailed) {
+  ProblemImpl problem;
+  double x = 3.0;
+  problem.AddResidualBlock(new FailingCostFunction, nullptr, &x);
+  problem.SetParameterBlockConstant(&x);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+TEST(TrustRegionPreprocessor, RemoveParameterBlocksSucceeds) {
+  ProblemImpl problem;
+  double x = 3.0;
+  problem.AddParameterBlock(&x, 1);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+}
+
+template <int kNumResiduals, int... Ns>
+class DummyCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
+ public:
+  bool Evaluate(double const* const* parameters,
+                double* residuals,
+                double** jacobians) const {
+    for (int i = 0; i < kNumResiduals; ++i) {
+      residuals[i] = kNumResiduals * kNumResiduals + i;
+    }
+
+    if (jacobians == nullptr) {
+      return true;
+    }
+
+    std::array<int, sizeof...(Ns)> N{Ns...};
+    for (size_t i = 0; i < N.size(); ++i) {
+      if (jacobians[i] != nullptr) {
+        MatrixRef j(jacobians[i], kNumResiduals, N[i]);
+        j.setOnes();
+        j *= kNumResiduals * N[i];
+      }
+    }
+
+    return true;
+  }
+};
+
+class LinearSolverAndEvaluatorCreationTest : public ::testing::Test {
+ public:
+  virtual void SetUp() {
+    x_ = 1.0;
+    y_ = 1.0;
+    z_ = 1.0;
+    problem_.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &x_, &y_);
+    problem_.AddResidualBlock(new DummyCostFunction<1, 1, 1>, nullptr, &y_, &z_);
+  }
+
+  void PreprocessForGivenLinearSolverAndVerify(
+      const LinearSolverType linear_solver_type) {
+    Solver::Options options;
+    options.linear_solver_type = linear_solver_type;
+    TrustRegionPreprocessor preprocessor;
+    PreprocessedProblem pp;
+    EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+    EXPECT_EQ(pp.options.linear_solver_type, linear_solver_type);
+    EXPECT_EQ(pp.linear_solver_options.type, linear_solver_type);
+    EXPECT_EQ(pp.evaluator_options.linear_solver_type, linear_solver_type);
+    EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+    EXPECT_TRUE(pp.evaluator.get() != nullptr);
+  }
+
+ protected:
+  ProblemImpl problem_;
+  double x_;
+  double y_;
+  double z_;
+};
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, DenseQR) {
+  PreprocessForGivenLinearSolverAndVerify(DENSE_QR);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, DenseNormalCholesky) {
+  PreprocessForGivenLinearSolverAndVerify(DENSE_NORMAL_CHOLESKY);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, DenseSchur) {
+  PreprocessForGivenLinearSolverAndVerify(DENSE_SCHUR);
+}
+
+#if !defined(CERES_NO_SPARSE)
+TEST_F(LinearSolverAndEvaluatorCreationTest, SparseNormalCholesky) {
+  PreprocessForGivenLinearSolverAndVerify(SPARSE_NORMAL_CHOLESKY);
+}
+#endif
+
+#if !defined(CERES_NO_SPARSE)
+TEST_F(LinearSolverAndEvaluatorCreationTest, SparseSchur) {
+  PreprocessForGivenLinearSolverAndVerify(SPARSE_SCHUR);
+}
+#endif
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, CGNR) {
+  PreprocessForGivenLinearSolverAndVerify(CGNR);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, IterativeSchur) {
+  PreprocessForGivenLinearSolverAndVerify(ITERATIVE_SCHUR);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, MinimizerIsAwareOfBounds) {
+  problem_.SetParameterLowerBound(&x_, 0, 0.0);
+  Solver::Options options;
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_EQ(pp.options.linear_solver_type, options.linear_solver_type);
+  EXPECT_EQ(pp.linear_solver_options.type, options.linear_solver_type);
+  EXPECT_EQ(pp.evaluator_options.linear_solver_type,
+            options.linear_solver_type);
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+  EXPECT_TRUE(pp.minimizer_options.is_constrained);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, SchurTypeSolverWithBadOrdering) {
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering->AddElementToGroup(&x_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&y_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&z_, 1);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem_, &pp));
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, SchurTypeSolverWithGoodOrdering) {
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering->AddElementToGroup(&x_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&z_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&y_, 1);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_EQ(pp.options.linear_solver_type, DENSE_SCHUR);
+  EXPECT_EQ(pp.linear_solver_options.type, DENSE_SCHUR);
+  EXPECT_EQ(pp.evaluator_options.linear_solver_type, DENSE_SCHUR);
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest,
+       SchurTypeSolverWithEmptyFirstEliminationGroup) {
+  problem_.SetParameterBlockConstant(&x_);
+  problem_.SetParameterBlockConstant(&z_);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering->AddElementToGroup(&x_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&z_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&y_, 1);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_EQ(pp.options.linear_solver_type, DENSE_QR);
+  EXPECT_EQ(pp.linear_solver_options.type, DENSE_QR);
+  EXPECT_EQ(pp.evaluator_options.linear_solver_type, DENSE_QR);
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest,
+       SchurTypeSolverWithEmptySecondEliminationGroup) {
+  problem_.SetParameterBlockConstant(&y_);
+
+  Solver::Options options;
+  options.linear_solver_type = DENSE_SCHUR;
+  options.linear_solver_ordering.reset(new ParameterBlockOrdering);
+  options.linear_solver_ordering->AddElementToGroup(&x_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&z_, 0);
+  options.linear_solver_ordering->AddElementToGroup(&y_, 1);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_EQ(pp.options.linear_solver_type, DENSE_SCHUR);
+  EXPECT_EQ(pp.linear_solver_options.type, DENSE_SCHUR);
+  EXPECT_EQ(pp.evaluator_options.linear_solver_type, DENSE_SCHUR);
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+}
+
+TEST(TrustRegionPreprocessorTest, InnerIterationsWithOneParameterBlock) {
+  ProblemImpl problem;
+  double x = 1.0;
+  problem.AddResidualBlock(new DummyCostFunction<1, 1>, nullptr, &x);
+
+  Solver::Options options;
+  options.use_inner_iterations = true;
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem, &pp));
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+  EXPECT_TRUE(pp.inner_iteration_minimizer.get() == nullptr);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest,
+       InnerIterationsWithTwoParameterBlocks) {
+  Solver::Options options;
+  options.use_inner_iterations = true;
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+  EXPECT_TRUE(pp.inner_iteration_minimizer.get() != nullptr);
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest,
+       InvalidInnerIterationsOrdering) {
+  Solver::Options options;
+  options.use_inner_iterations = true;
+  options.inner_iteration_ordering.reset(new ParameterBlockOrdering);
+  options.inner_iteration_ordering->AddElementToGroup(&x_, 0);
+  options.inner_iteration_ordering->AddElementToGroup(&z_, 0);
+  options.inner_iteration_ordering->AddElementToGroup(&y_, 0);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_FALSE(preprocessor.Preprocess(options, &problem_, &pp));
+}
+
+TEST_F(LinearSolverAndEvaluatorCreationTest, ValidInnerIterationsOrdering) {
+  Solver::Options options;
+  options.use_inner_iterations = true;
+  options.inner_iteration_ordering.reset(new ParameterBlockOrdering);
+  options.inner_iteration_ordering->AddElementToGroup(&x_, 0);
+  options.inner_iteration_ordering->AddElementToGroup(&z_, 0);
+  options.inner_iteration_ordering->AddElementToGroup(&y_, 1);
+
+  TrustRegionPreprocessor preprocessor;
+  PreprocessedProblem pp;
+  EXPECT_TRUE(preprocessor.Preprocess(options, &problem_, &pp));
+  EXPECT_TRUE(pp.linear_solver.get() != nullptr);
+  EXPECT_TRUE(pp.evaluator.get() != nullptr);
+  EXPECT_TRUE(pp.inner_iteration_minimizer.get() != nullptr);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_step_evaluator.cc b/internal/ceres/trust_region_step_evaluator.cc
new file mode 100644
index 0000000..33b0c41
--- /dev/null
+++ b/internal/ceres/trust_region_step_evaluator.cc
@@ -0,0 +1,117 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include <limits>
+#include "ceres/trust_region_step_evaluator.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+TrustRegionStepEvaluator::TrustRegionStepEvaluator(
+    const double initial_cost,
+    const int max_consecutive_nonmonotonic_steps)
+    : max_consecutive_nonmonotonic_steps_(max_consecutive_nonmonotonic_steps),
+      minimum_cost_(initial_cost),
+      current_cost_(initial_cost),
+      reference_cost_(initial_cost),
+      candidate_cost_(initial_cost),
+      accumulated_reference_model_cost_change_(0.0),
+      accumulated_candidate_model_cost_change_(0.0),
+      num_consecutive_nonmonotonic_steps_(0){
+}
+
+double TrustRegionStepEvaluator::StepQuality(
+    const double cost,
+    const double model_cost_change) const {
+  // If the function evaluation for this step was a failure, in which
+  // case the TrustRegionMinimizer would have set the cost to
+  // std::numeric_limits<double>::max(). In this case, the division by
+  // model_cost_change can result in an overflow. To prevent that from
+  // happening, we will deal with this case explicitly.
+  if (cost >= std::numeric_limits<double>::max()) {
+    return std::numeric_limits<double>::lowest();
+  }
+
+  const double relative_decrease = (current_cost_ - cost) / model_cost_change;
+  const double historical_relative_decrease =
+      (reference_cost_ - cost) /
+      (accumulated_reference_model_cost_change_ + model_cost_change);
+  return std::max(relative_decrease, historical_relative_decrease);
+}
+
+void TrustRegionStepEvaluator::StepAccepted(
+    const double cost,
+    const double model_cost_change) {
+  // Algorithm 10.1.2 from Trust Region Methods by Conn, Gould &
+  // Toint.
+  //
+  // Step 3a
+  current_cost_ = cost;
+  accumulated_candidate_model_cost_change_ += model_cost_change;
+  accumulated_reference_model_cost_change_ += model_cost_change;
+
+  // Step 3b.
+  if (current_cost_ < minimum_cost_) {
+    minimum_cost_ = current_cost_;
+    num_consecutive_nonmonotonic_steps_ = 0;
+    candidate_cost_ = current_cost_;
+    accumulated_candidate_model_cost_change_ = 0.0;
+  } else {
+    // Step 3c.
+    ++num_consecutive_nonmonotonic_steps_;
+    if (current_cost_ > candidate_cost_) {
+      candidate_cost_ = current_cost_;
+      accumulated_candidate_model_cost_change_ = 0.0;
+    }
+  }
+
+  // Step 3d.
+  //
+  // At this point we have made too many non-monotonic steps and
+  // we are going to reset the value of the reference iterate so
+  // as to force the algorithm to descend.
+  //
+  // Note: In the original algorithm by Toint, this step was only
+  // executed if the step was non-monotonic, but that would not handle
+  // the case of max_consecutive_nonmonotonic_steps = 0. The small
+  // modification of doing this always handles that corner case
+  // correctly.
+  if (num_consecutive_nonmonotonic_steps_ ==
+      max_consecutive_nonmonotonic_steps_) {
+    reference_cost_ = candidate_cost_;
+    accumulated_reference_model_cost_change_ =
+        accumulated_candidate_model_cost_change_;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_step_evaluator.h b/internal/ceres/trust_region_step_evaluator.h
new file mode 100644
index 0000000..03c0036
--- /dev/null
+++ b/internal/ceres/trust_region_step_evaluator.h
@@ -0,0 +1,122 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2016 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
+#define CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
+
+namespace ceres {
+namespace internal {
+
+// The job of the TrustRegionStepEvaluator is to evaluate the quality
+// of a step, i.e., how the cost of a step compares with the reduction
+// in the objective of the trust region problem.
+//
+// Classic trust region methods are descent methods, in that they only
+// accept a point if it strictly reduces the value of the objective
+// function. They do this by measuring the quality of a step as
+//
+//   cost_change / model_cost_change.
+//
+// Relaxing the monotonic descent requirement allows the algorithm to
+// be more efficient in the long term at the cost of some local
+// increase in the value of the objective function.
+//
+// This is because allowing for non-decreasing objective function
+// values in a principled manner allows the algorithm to "jump over
+// boulders" as the method is not restricted to move into narrow
+// valleys while preserving its convergence properties.
+//
+// The parameter max_consecutive_nonmonotonic_steps controls the
+// window size used by the step selection algorithm to accept
+// non-monotonic steps. Setting this parameter to zero, recovers the
+// classic monotonic descent algorithm.
+//
+// Based on algorithm 10.1.2 (page 357) of "Trust Region
+// Methods" by Conn Gould & Toint, or equations 33-40 of
+// "Non-monotone trust-region algorithms for nonlinear
+// optimization subject to convex constraints" by Phil Toint,
+// Mathematical Programming, 77, 1997.
+//
+// Example usage:
+//
+// TrustRegionStepEvaluator* step_evaluator = ...
+//
+// cost = ... // Compute the non-linear objective function value.
+// model_cost_change = ... // Change in the value of the trust region objective.
+// if (step_evaluator->StepQuality(cost, model_cost_change) > threshold) {
+//   x = x + delta;
+//   step_evaluator->StepAccepted(cost, model_cost_change);
+// }
+class TrustRegionStepEvaluator {
+ public:
+  // initial_cost is as the name implies the cost of the starting
+  // state of the trust region minimizer.
+  //
+  // max_consecutive_nonmonotonic_steps controls the window size used
+  // by the step selection algorithm to accept non-monotonic
+  // steps. Setting this parameter to zero, recovers the classic
+  // monotonic descent algorithm.
+  TrustRegionStepEvaluator(double initial_cost,
+                           int max_consecutive_nonmonotonic_steps);
+
+  // Return the quality of the step given its cost and the decrease in
+  // the cost of the model. model_cost_change has to be positive.
+  double StepQuality(double cost, double model_cost_change) const;
+
+  // Inform the step evaluator that a step with the given cost and
+  // model_cost_change has been accepted by the trust region
+  // minimizer.
+  void StepAccepted(double cost, double model_cost_change);
+
+ private:
+  const int max_consecutive_nonmonotonic_steps_;
+  // The minimum cost encountered up till now.
+  double minimum_cost_;
+  // The current cost of the trust region minimizer as informed by the
+  // last call to StepAccepted.
+  double current_cost_;
+  double reference_cost_;
+  double candidate_cost_;
+  // Accumulated model cost since the last time the reference model
+  // cost was updated, i.e., when a step with cost less than the
+  // current known minimum cost is accepted.
+  double accumulated_reference_model_cost_change_;
+  // Accumulated model cost since the last time the candidate model
+  // cost was updated, i.e., a non-monotonic step was taken with a
+  // cost that was greater than the current candidate cost.
+  double accumulated_candidate_model_cost_change_;
+  // Number of steps taken since the last time minimum_cost was updated.
+  int num_consecutive_nonmonotonic_steps_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRUST_REGION_STEP_EVALUATOR_H_
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc
new file mode 100644
index 0000000..2db6a6c
--- /dev/null
+++ b/internal/ceres/trust_region_strategy.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//         keir@google.com (Keir Mierle)
+
+#include "ceres/trust_region_strategy.h"
+#include "ceres/dogleg_strategy.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+TrustRegionStrategy::~TrustRegionStrategy() {}
+
+TrustRegionStrategy* TrustRegionStrategy::Create(const Options& options) {
+  switch (options.trust_region_strategy_type) {
+    case LEVENBERG_MARQUARDT:
+      return new LevenbergMarquardtStrategy(options);
+    case DOGLEG:
+      return new DoglegStrategy(options);
+    default:
+      LOG(FATAL) << "Unknown trust region strategy: "
+                 << options.trust_region_strategy_type;
+  }
+
+  LOG(FATAL) << "Unknown trust region strategy: "
+             << options.trust_region_strategy_type;
+  return NULL;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
new file mode 100644
index 0000000..b3b2e5d
--- /dev/null
+++ b/internal/ceres/trust_region_strategy.h
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+#define CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearSolver;
+class SparseMatrix;
+
+// Interface for classes implementing various trust region strategies
+// for nonlinear least squares problems.
+//
+// The object is expected to maintain and update a trust region
+// radius, which it then uses to solve for the trust region step using
+// the jacobian matrix and residual vector.
+//
+// Here the term trust region radius is used loosely, as the strategy
+// is free to treat it as guidance and violate it as need be. e.g.,
+// the LevenbergMarquardtStrategy uses the inverse of the trust region
+// radius to scale the damping term, which controls the step size, but
+// does not set a hard limit on its size.
+class TrustRegionStrategy {
+ public:
+  struct Options {
+    TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
+    // Linear solver used for actually solving the trust region step.
+    LinearSolver* linear_solver = nullptr;
+    double initial_radius = 1e4;
+    double max_radius = 1e32;
+
+    // Minimum and maximum values of the diagonal damping matrix used
+    // by LevenbergMarquardtStrategy. The DoglegStrategy also uses
+    // these bounds to construct a regularizing diagonal to ensure
+    // that the Gauss-Newton step computation is of full rank.
+    double min_lm_diagonal = 1e-6;
+    double max_lm_diagonal = 1e32;
+
+    // Further specify which dogleg method to use
+    DoglegType dogleg_type = TRADITIONAL_DOGLEG;
+  };
+
+  // Per solve options.
+  struct PerSolveOptions {
+    // Forcing sequence for inexact solves.
+    double eta = 1e-1;
+
+    DumpFormatType dump_format_type = TEXTFILE;
+
+    // If non-empty and dump_format_type is not CONSOLE, the trust
+    // regions strategy will write the linear system to file(s) with
+    // name starting with dump_filename_base.  If dump_format_type is
+    // CONSOLE then dump_filename_base will be ignored and the linear
+    // system will be written to the standard error.
+    std::string dump_filename_base;
+  };
+
+  struct Summary {
+    // If the trust region problem is,
+    //
+    //   1/2 x'Ax + b'x + c,
+    //
+    // then
+    //
+    //   residual_norm = |Ax -b|
+    double residual_norm = -1;
+
+    // Number of iterations used by the linear solver. If a linear
+    // solver was not called (e.g., DogLegStrategy after an
+    // unsuccessful step), then this would be zero.
+    int num_iterations = -1;
+
+    // Status of the linear solver used to solve the Newton system.
+    LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
+  };
+
+  virtual ~TrustRegionStrategy();
+
+  // Use the current radius to solve for the trust region step.
+  virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+                              SparseMatrix* jacobian,
+                              const double* residuals,
+                              double* step) = 0;
+
+  // Inform the strategy that the current step has been accepted, and
+  // that the ratio of the decrease in the non-linear objective to the
+  // decrease in the trust region model is step_quality.
+  virtual void StepAccepted(double step_quality) = 0;
+
+  // Inform the strategy that the current step has been rejected, and
+  // that the ratio of the decrease in the non-linear objective to the
+  // decrease in the trust region model is step_quality.
+  virtual void StepRejected(double step_quality) = 0;
+
+  // Inform the strategy that the current step has been rejected
+  // because it was found to be numerically invalid.
+  // StepRejected/StepAccepted will not be called for this step, and
+  // the strategy is free to do what it wants with this information.
+  virtual void StepIsInvalid() = 0;
+
+  // Current trust region radius.
+  virtual double Radius() const = 0;
+
+  // Factory.
+  static TrustRegionStrategy* Create(const Options& options);
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
new file mode 100644
index 0000000..932ec7d
--- /dev/null
+++ b/internal/ceres/types.cc
@@ -0,0 +1,403 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include <cctype>
+#include <string>
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+using std::string;
+
+#define CASESTR(x) case x: return #x
+#define STRENUM(x) if (value == #x) { *type = x; return true;}
+
+static void UpperCase(string* input) {
+  std::transform(input->begin(), input->end(), input->begin(), ::toupper);
+}
+
+const char* LinearSolverTypeToString(LinearSolverType type) {
+  switch (type) {
+    CASESTR(DENSE_NORMAL_CHOLESKY);
+    CASESTR(DENSE_QR);
+    CASESTR(SPARSE_NORMAL_CHOLESKY);
+    CASESTR(DENSE_SCHUR);
+    CASESTR(SPARSE_SCHUR);
+    CASESTR(ITERATIVE_SCHUR);
+    CASESTR(CGNR);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToLinearSolverType(string value, LinearSolverType* type) {
+  UpperCase(&value);
+  STRENUM(DENSE_NORMAL_CHOLESKY);
+  STRENUM(DENSE_QR);
+  STRENUM(SPARSE_NORMAL_CHOLESKY);
+  STRENUM(DENSE_SCHUR);
+  STRENUM(SPARSE_SCHUR);
+  STRENUM(ITERATIVE_SCHUR);
+  STRENUM(CGNR);
+  return false;
+}
+
+const char* PreconditionerTypeToString(PreconditionerType type) {
+  switch (type) {
+    CASESTR(IDENTITY);
+    CASESTR(JACOBI);
+    CASESTR(SCHUR_JACOBI);
+    CASESTR(CLUSTER_JACOBI);
+    CASESTR(CLUSTER_TRIDIAGONAL);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToPreconditionerType(string value, PreconditionerType* type) {
+  UpperCase(&value);
+  STRENUM(IDENTITY);
+  STRENUM(JACOBI);
+  STRENUM(SCHUR_JACOBI);
+  STRENUM(CLUSTER_JACOBI);
+  STRENUM(CLUSTER_TRIDIAGONAL);
+  return false;
+}
+
+const char* SparseLinearAlgebraLibraryTypeToString(
+    SparseLinearAlgebraLibraryType type) {
+  switch (type) {
+    CASESTR(SUITE_SPARSE);
+    CASESTR(CX_SPARSE);
+    CASESTR(EIGEN_SPARSE);
+    CASESTR(ACCELERATE_SPARSE);
+    CASESTR(NO_SPARSE);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToSparseLinearAlgebraLibraryType(
+    string value,
+    SparseLinearAlgebraLibraryType* type) {
+  UpperCase(&value);
+  STRENUM(SUITE_SPARSE);
+  STRENUM(CX_SPARSE);
+  STRENUM(EIGEN_SPARSE);
+  STRENUM(ACCELERATE_SPARSE);
+  STRENUM(NO_SPARSE);
+  return false;
+}
+
+const char* DenseLinearAlgebraLibraryTypeToString(
+    DenseLinearAlgebraLibraryType type) {
+  switch (type) {
+    CASESTR(EIGEN);
+    CASESTR(LAPACK);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToDenseLinearAlgebraLibraryType(
+    string value,
+    DenseLinearAlgebraLibraryType* type) {
+  UpperCase(&value);
+  STRENUM(EIGEN);
+  STRENUM(LAPACK);
+  return false;
+}
+
+const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type) {
+  switch (type) {
+    CASESTR(LEVENBERG_MARQUARDT);
+    CASESTR(DOGLEG);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToTrustRegionStrategyType(string value,
+                                     TrustRegionStrategyType* type) {
+  UpperCase(&value);
+  STRENUM(LEVENBERG_MARQUARDT);
+  STRENUM(DOGLEG);
+  return false;
+}
+
+const char* DoglegTypeToString(DoglegType type) {
+  switch (type) {
+    CASESTR(TRADITIONAL_DOGLEG);
+    CASESTR(SUBSPACE_DOGLEG);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToDoglegType(string value, DoglegType* type) {
+  UpperCase(&value);
+  STRENUM(TRADITIONAL_DOGLEG);
+  STRENUM(SUBSPACE_DOGLEG);
+  return false;
+}
+
+const char* MinimizerTypeToString(MinimizerType type) {
+  switch (type) {
+    CASESTR(TRUST_REGION);
+    CASESTR(LINE_SEARCH);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToMinimizerType(string value, MinimizerType* type) {
+  UpperCase(&value);
+  STRENUM(TRUST_REGION);
+  STRENUM(LINE_SEARCH);
+  return false;
+}
+
+const char* LineSearchDirectionTypeToString(LineSearchDirectionType type) {
+  switch (type) {
+    CASESTR(STEEPEST_DESCENT);
+    CASESTR(NONLINEAR_CONJUGATE_GRADIENT);
+    CASESTR(LBFGS);
+    CASESTR(BFGS);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToLineSearchDirectionType(string value,
+                                     LineSearchDirectionType* type) {
+  UpperCase(&value);
+  STRENUM(STEEPEST_DESCENT);
+  STRENUM(NONLINEAR_CONJUGATE_GRADIENT);
+  STRENUM(LBFGS);
+  STRENUM(BFGS);
+  return false;
+}
+
+const char* LineSearchTypeToString(LineSearchType type) {
+  switch (type) {
+    CASESTR(ARMIJO);
+    CASESTR(WOLFE);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToLineSearchType(string value, LineSearchType* type) {
+  UpperCase(&value);
+  STRENUM(ARMIJO);
+  STRENUM(WOLFE);
+  return false;
+}
+
+const char* LineSearchInterpolationTypeToString(
+    LineSearchInterpolationType type) {
+  switch (type) {
+    CASESTR(BISECTION);
+    CASESTR(QUADRATIC);
+    CASESTR(CUBIC);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToLineSearchInterpolationType(
+    string value,
+    LineSearchInterpolationType* type) {
+  UpperCase(&value);
+  STRENUM(BISECTION);
+  STRENUM(QUADRATIC);
+  STRENUM(CUBIC);
+  return false;
+}
+
+const char* NonlinearConjugateGradientTypeToString(
+    NonlinearConjugateGradientType type) {
+  switch (type) {
+    CASESTR(FLETCHER_REEVES);
+    CASESTR(POLAK_RIBIERE);
+    CASESTR(HESTENES_STIEFEL);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToNonlinearConjugateGradientType(
+    string value,
+    NonlinearConjugateGradientType* type) {
+  UpperCase(&value);
+  STRENUM(FLETCHER_REEVES);
+  STRENUM(POLAK_RIBIERE);
+  STRENUM(HESTENES_STIEFEL);
+  return false;
+}
+
+const char* CovarianceAlgorithmTypeToString(
+    CovarianceAlgorithmType type) {
+  switch (type) {
+    CASESTR(DENSE_SVD);
+    CASESTR(SPARSE_QR);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToCovarianceAlgorithmType(
+    string value,
+    CovarianceAlgorithmType* type) {
+  UpperCase(&value);
+  STRENUM(DENSE_SVD);
+  STRENUM(SPARSE_QR);
+  return false;
+}
+
+const char* NumericDiffMethodTypeToString(
+    NumericDiffMethodType type) {
+  switch (type) {
+    CASESTR(CENTRAL);
+    CASESTR(FORWARD);
+    CASESTR(RIDDERS);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToNumericDiffMethodType(
+    string value,
+    NumericDiffMethodType* type) {
+  UpperCase(&value);
+  STRENUM(CENTRAL);
+  STRENUM(FORWARD);
+  STRENUM(RIDDERS);
+  return false;
+}
+
+const char* VisibilityClusteringTypeToString(
+    VisibilityClusteringType type) {
+  switch (type) {
+    CASESTR(CANONICAL_VIEWS);
+    CASESTR(SINGLE_LINKAGE);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+bool StringToVisibilityClusteringType(
+    string value,
+    VisibilityClusteringType* type) {
+  UpperCase(&value);
+  STRENUM(CANONICAL_VIEWS);
+  STRENUM(SINGLE_LINKAGE);
+  return false;
+}
+
+const char* TerminationTypeToString(TerminationType type) {
+  switch (type) {
+    CASESTR(CONVERGENCE);
+    CASESTR(NO_CONVERGENCE);
+    CASESTR(FAILURE);
+    CASESTR(USER_SUCCESS);
+    CASESTR(USER_FAILURE);
+    default:
+      return "UNKNOWN";
+  }
+}
+
+#undef CASESTR
+#undef STRENUM
+
+bool IsSchurType(LinearSolverType type) {
+  return ((type == SPARSE_SCHUR) ||
+          (type == DENSE_SCHUR)  ||
+          (type == ITERATIVE_SCHUR));
+}
+
+bool IsSparseLinearAlgebraLibraryTypeAvailable(
+    SparseLinearAlgebraLibraryType type) {
+  if (type == SUITE_SPARSE) {
+#ifdef CERES_NO_SUITESPARSE
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  if (type == CX_SPARSE) {
+#ifdef CERES_NO_CXSPARSE
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  if (type == ACCELERATE_SPARSE) {
+#ifdef CERES_NO_ACCELERATE_SPARSE
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  if (type == EIGEN_SPARSE) {
+#ifdef CERES_USE_EIGEN_SPARSE
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  LOG(WARNING) << "Unknown sparse linear algebra library " << type;
+  return false;
+}
+
+bool IsDenseLinearAlgebraLibraryTypeAvailable(
+    DenseLinearAlgebraLibraryType type) {
+  if (type == EIGEN) {
+    return true;
+  }
+  if (type == LAPACK) {
+#ifdef CERES_NO_LAPACK
+    return false;
+#else
+    return true;
+#endif
+  }
+
+  LOG(WARNING) << "Unknown dense linear algebra library " << type;
+  return false;
+}
+
+}  // namespace ceres
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
new file mode 100644
index 0000000..72a1c33
--- /dev/null
+++ b/internal/ceres/visibility.cc
@@ -0,0 +1,152 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+
+#include "ceres/visibility.h"
+
+#include <cmath>
+#include <ctime>
+#include <algorithm>
+#include <set>
+#include <vector>
+#include <unordered_map>
+#include <utility>
+#include "ceres/block_structure.h"
+#include "ceres/graph.h"
+#include "ceres/pair_hash.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::max;
+using std::pair;
+using std::set;
+using std::vector;
+
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+                       const int num_eliminate_blocks,
+                       vector<set<int>>* visibility) {
+  CHECK(visibility != nullptr);
+
+  // Clear the visibility vector and resize it to hold a
+  // vector for each camera.
+  visibility->resize(0);
+  visibility->resize(block_structure.cols.size() - num_eliminate_blocks);
+
+  for (int i = 0; i < block_structure.rows.size(); ++i) {
+    const vector<Cell>& cells = block_structure.rows[i].cells;
+    int block_id = cells[0].block_id;
+    // If the first block is not an e_block, then skip this row block.
+    if (block_id >= num_eliminate_blocks) {
+      continue;
+    }
+
+    for (int j = 1; j < cells.size(); ++j) {
+      int camera_block_id = cells[j].block_id - num_eliminate_blocks;
+      DCHECK_GE(camera_block_id, 0);
+      DCHECK_LT(camera_block_id, visibility->size());
+      (*visibility)[camera_block_id].insert(block_id);
+    }
+  }
+}
+
+WeightedGraph<int>* CreateSchurComplementGraph(
+    const vector<set<int>>& visibility) {
+  const time_t start_time = time(NULL);
+  // Compute the number of e_blocks/point blocks. Since the visibility
+  // set for each e_block/camera contains the set of e_blocks/points
+  // visible to it, we find the maximum across all visibility sets.
+  int num_points = 0;
+  for (int i = 0; i < visibility.size(); i++) {
+    if (visibility[i].size() > 0) {
+      num_points = max(num_points, (*visibility[i].rbegin()) + 1);
+    }
+  }
+
+  // Invert the visibility. The input is a camera->point mapping,
+  // which tells us which points are visible in which
+  // cameras. However, to compute the sparsity structure of the Schur
+  // Complement efficiently, its better to have the point->camera
+  // mapping.
+  vector<set<int>> inverse_visibility(num_points);
+  for (int i = 0; i < visibility.size(); i++) {
+    const set<int>& visibility_set = visibility[i];
+    for (const int v : visibility_set) {
+      inverse_visibility[v].insert(i);
+    }
+  }
+
+  // Map from camera pairs to number of points visible to both cameras
+  // in the pair.
+  std::unordered_map<pair<int, int>, int, pair_hash> camera_pairs;
+
+  // Count the number of points visible to each camera/f_block pair.
+  for (const auto& inverse_visibility_set : inverse_visibility) {
+    for (set<int>::const_iterator camera1 = inverse_visibility_set.begin();
+         camera1 != inverse_visibility_set.end();
+         ++camera1) {
+      set<int>::const_iterator camera2 = camera1;
+      for (++camera2; camera2 != inverse_visibility_set.end(); ++camera2) {
+        ++(camera_pairs[make_pair(*camera1, *camera2)]);
+      }
+    }
+  }
+
+  WeightedGraph<int>* graph = new WeightedGraph<int>;
+
+  // Add vertices and initialize the pairs for self edges so that self
+  // edges are guaranteed. This is needed for the Canonical views
+  // algorithm to work correctly.
+  static const double kSelfEdgeWeight = 1.0;
+  for (int i = 0; i < visibility.size(); ++i) {
+    graph->AddVertex(i);
+    graph->AddEdge(i, i, kSelfEdgeWeight);
+  }
+
+  // Add an edge for each camera pair.
+  for (const auto& camera_pair_count : camera_pairs) {
+    const int camera1 = camera_pair_count.first.first;
+    const int camera2 = camera_pair_count.first.second;
+    const int count = camera_pair_count.second;
+    DCHECK_NE(camera1, camera2);
+    // Static cast necessary for Windows.
+    const double weight = static_cast<double>(count) /
+        (sqrt(static_cast<double>(
+                  visibility[camera1].size() * visibility[camera2].size())));
+    graph->AddEdge(camera1, camera2, weight);
+  }
+
+  VLOG(2) << "Schur complement graph time: " << (time(NULL) - start_time);
+  return graph;
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/visibility.h b/internal/ceres/visibility.h
new file mode 100644
index 0000000..115d45f
--- /dev/null
+++ b/internal/ceres/visibility.h
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+//         sameeragarwal@google.com (Sameer Agarwal)
+//
+// Functions to manipulate visibility information from the block
+// structure of sparse matrices.
+
+#ifndef CERES_INTERNAL_VISIBILITY_H_
+#define CERES_INTERNAL_VISIBILITY_H_
+
+#include <set>
+#include <vector>
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct CompressedRowBlockStructure;
+
+// Given a compressed row block structure, computes the set of
+// e_blocks "visible" to each f_block. If an e_block co-occurs with an
+// f_block in a residual block, it is visible to the f_block. The
+// first num_eliminate_blocks columns blocks are e_blocks and the rest
+// f_blocks.
+//
+// In a structure from motion problem, e_blocks correspond to 3D
+// points and f_blocks correspond to cameras.
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+                       int num_eliminate_blocks,
+                       std::vector<std::set<int>>* visibility);
+
+// Given f_block visibility as computed by the ComputeVisibility
+// function above, construct and return a graph whose vertices are
+// f_blocks and an edge connects two vertices if they have at least one
+// e_block in common. The weight of this edge is normalized dot
+// product between the visibility vectors of the two
+// vertices/f_blocks.
+//
+// This graph reflects the sparsity structure of reduced camera
+// matrix/Schur complement matrix obtained by eliminating the e_blocks
+// from the normal equations.
+//
+// Caller acquires ownership of the returned WeightedGraph pointer
+// (heap-allocated).
+WeightedGraph<int>* CreateSchurComplementGraph(
+    const std::vector<std::set<int>>& visibility);
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_VISIBILITY_H_
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
new file mode 100644
index 0000000..ed4afb6
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -0,0 +1,584 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility_based_preconditioner.h"
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/canonical_views_clustering.h"
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/single_linkage_clustering.h"
+#include "ceres/visibility.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::swap;
+using std::vector;
+
+// TODO(sameeragarwal): Currently these are magic weights for the
+// preconditioner construction. Move these higher up into the Options
+// struct and provide some guidelines for choosing them.
+//
+// This will require some more work on the clustering algorithm and
+// possibly some more refactoring of the code.
+static const double kCanonicalViewsSizePenaltyWeight = 3.0;
+static const double kCanonicalViewsSimilarityPenaltyWeight = 0.0;
+static const double kSingleLinkageMinSimilarity = 0.9;
+
+VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
+    const CompressedRowBlockStructure& bs,
+    const Preconditioner::Options& options)
+    : options_(options), num_blocks_(0), num_clusters_(0) {
+  CHECK_GT(options_.elimination_groups.size(), 1);
+  CHECK_GT(options_.elimination_groups[0], 0);
+  CHECK(options_.type == CLUSTER_JACOBI || options_.type == CLUSTER_TRIDIAGONAL)
+      << "Unknown preconditioner type: " << options_.type;
+  num_blocks_ = bs.cols.size() - options_.elimination_groups[0];
+  CHECK_GT(num_blocks_, 0) << "Jacobian should have at least 1 f_block for "
+                           << "visibility based preconditioning.";
+  CHECK(options_.context != NULL);
+
+  // Vector of camera block sizes
+  block_size_.resize(num_blocks_);
+  for (int i = 0; i < num_blocks_; ++i) {
+    block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
+  }
+
+  const time_t start_time = time(NULL);
+  switch (options_.type) {
+    case CLUSTER_JACOBI:
+      ComputeClusterJacobiSparsity(bs);
+      break;
+    case CLUSTER_TRIDIAGONAL:
+      ComputeClusterTridiagonalSparsity(bs);
+      break;
+    default:
+      LOG(FATAL) << "Unknown preconditioner type";
+  }
+  const time_t structure_time = time(NULL);
+  InitStorage(bs);
+  const time_t storage_time = time(NULL);
+  InitEliminator(bs);
+  const time_t eliminator_time = time(NULL);
+
+  LinearSolver::Options sparse_cholesky_options;
+  sparse_cholesky_options.sparse_linear_algebra_library_type =
+      options_.sparse_linear_algebra_library_type;
+
+  // The preconditioner's sparsity is not available in the
+  // preprocessor, so the columns of the Jacobian have not been
+  // reordered to minimize fill in when computing its sparse Cholesky
+  // factorization. So we must tell the SparseCholesky object to
+  // perform approximate minimum-degree reordering, which is done by
+  // setting use_postordering to true.
+  sparse_cholesky_options.use_postordering = true;
+  sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
+
+  const time_t init_time = time(NULL);
+  VLOG(2) << "init time: " << init_time - start_time
+          << " structure time: " << structure_time - start_time
+          << " storage time:" << storage_time - structure_time
+          << " eliminator time: " << eliminator_time - storage_time;
+}
+
+VisibilityBasedPreconditioner::~VisibilityBasedPreconditioner() {}
+
+// Determine the sparsity structure of the CLUSTER_JACOBI
+// preconditioner. It clusters cameras using their scene
+// visibility. The clusters form the diagonal blocks of the
+// preconditioner matrix.
+void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity(
+    const CompressedRowBlockStructure& bs) {
+  vector<set<int>> visibility;
+  ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+  CHECK_EQ(num_blocks_, visibility.size());
+  ClusterCameras(visibility);
+  cluster_pairs_.clear();
+  for (int i = 0; i < num_clusters_; ++i) {
+    cluster_pairs_.insert(make_pair(i, i));
+  }
+}
+
+// Determine the sparsity structure of the CLUSTER_TRIDIAGONAL
+// preconditioner. It clusters cameras using using the scene
+// visibility and then finds the strongly interacting pairs of
+// clusters by constructing another graph with the clusters as
+// vertices and approximating it with a degree-2 maximum spanning
+// forest. The set of edges in this forest are the cluster pairs.
+void VisibilityBasedPreconditioner::ComputeClusterTridiagonalSparsity(
+    const CompressedRowBlockStructure& bs) {
+  vector<set<int>> visibility;
+  ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+  CHECK_EQ(num_blocks_, visibility.size());
+  ClusterCameras(visibility);
+
+  // Construct a weighted graph on the set of clusters, where the
+  // edges are the number of 3D points/e_blocks visible in both the
+  // clusters at the ends of the edge. Return an approximate degree-2
+  // maximum spanning forest of this graph.
+  vector<set<int>> cluster_visibility;
+  ComputeClusterVisibility(visibility, &cluster_visibility);
+  std::unique_ptr<WeightedGraph<int>> cluster_graph(
+      CreateClusterGraph(cluster_visibility));
+  CHECK(cluster_graph != nullptr);
+  std::unique_ptr<WeightedGraph<int>> forest(
+      Degree2MaximumSpanningForest(*cluster_graph));
+  CHECK(forest != nullptr);
+  ForestToClusterPairs(*forest, &cluster_pairs_);
+}
+
+// Allocate storage for the preconditioner matrix.
+void VisibilityBasedPreconditioner::InitStorage(
+    const CompressedRowBlockStructure& bs) {
+  ComputeBlockPairsInPreconditioner(bs);
+  m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs_));
+}
+
+// Call the canonical views algorithm and cluster the cameras based on
+// their visibility sets. The visibility set of a camera is the set of
+// e_blocks/3D points in the scene that are seen by it.
+//
+// The cluster_membership_ vector is updated to indicate cluster
+// memberships for each camera block.
+void VisibilityBasedPreconditioner::ClusterCameras(
+    const vector<set<int> >& visibility) {
+  std::unique_ptr<WeightedGraph<int>> schur_complement_graph(
+      CreateSchurComplementGraph(visibility));
+  CHECK(schur_complement_graph != nullptr);
+
+  std::unordered_map<int, int> membership;
+
+  if (options_.visibility_clustering_type == CANONICAL_VIEWS) {
+    vector<int> centers;
+    CanonicalViewsClusteringOptions clustering_options;
+    clustering_options.size_penalty_weight = kCanonicalViewsSizePenaltyWeight;
+    clustering_options.similarity_penalty_weight =
+        kCanonicalViewsSimilarityPenaltyWeight;
+    ComputeCanonicalViewsClustering(
+        clustering_options, *schur_complement_graph, &centers, &membership);
+    num_clusters_ = centers.size();
+  } else if (options_.visibility_clustering_type == SINGLE_LINKAGE) {
+    SingleLinkageClusteringOptions clustering_options;
+    clustering_options.min_similarity = kSingleLinkageMinSimilarity;
+    num_clusters_ = ComputeSingleLinkageClustering(
+        clustering_options, *schur_complement_graph, &membership);
+  } else {
+    LOG(FATAL) << "Unknown visibility clustering algorithm.";
+  }
+
+  CHECK_GT(num_clusters_, 0);
+  VLOG(2) << "num_clusters: " << num_clusters_;
+  FlattenMembershipMap(membership, &cluster_membership_);
+}
+
+// Compute the block sparsity structure of the Schur complement
+// matrix. For each pair of cameras contributing a non-zero cell to
+// the schur complement, determine if that cell is present in the
+// preconditioner or not.
+//
+// A pair of cameras contribute a cell to the preconditioner if they
+// are part of the same cluster or if the two clusters that they
+// belong have an edge connecting them in the degree-2 maximum
+// spanning forest.
+//
+// For example, a camera pair (i,j) where i belongs to cluster1 and
+// j belongs to cluster2 (assume that cluster1 < cluster2).
+//
+// The cell corresponding to (i,j) is present in the preconditioner
+// if cluster1 == cluster2 or the pair (cluster1, cluster2) were
+// connected by an edge in the degree-2 maximum spanning forest.
+//
+// Since we have already expanded the forest into a set of camera
+// pairs/edges, including self edges, the check can be reduced to
+// checking membership of (cluster1, cluster2) in cluster_pairs_.
+void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
+    const CompressedRowBlockStructure& bs) {
+  block_pairs_.clear();
+  for (int i = 0; i < num_blocks_; ++i) {
+    block_pairs_.insert(make_pair(i, i));
+  }
+
+  int r = 0;
+  const int num_row_blocks = bs.rows.size();
+  const int num_eliminate_blocks = options_.elimination_groups[0];
+
+  // Iterate over each row of the matrix. The block structure of the
+  // matrix is assumed to be sorted in order of the e_blocks/point
+  // blocks. Thus all row blocks containing an e_block/point occur
+  // contiguously. Further, if present, an e_block is always the first
+  // parameter block in each row block.  These structural assumptions
+  // are common to all Schur complement based solvers in Ceres.
+  //
+  // For each e_block/point block we identify the set of cameras
+  // seeing it. The cross product of this set with itself is the set
+  // of non-zero cells contributed by this e_block.
+  //
+  // The time complexity of this is O(nm^2) where, n is the number of
+  // 3d points and m is the maximum number of cameras seeing any
+  // point, which for most scenes is a fairly small number.
+  while (r < num_row_blocks) {
+    int e_block_id = bs.rows[r].cells.front().block_id;
+    if (e_block_id >= num_eliminate_blocks) {
+      // Skip the rows whose first block is an f_block.
+      break;
+    }
+
+    set<int> f_blocks;
+    for (; r < num_row_blocks; ++r) {
+      const CompressedRow& row = bs.rows[r];
+      if (row.cells.front().block_id != e_block_id) {
+        break;
+      }
+
+      // Iterate over the blocks in the row, ignoring the first block
+      // since it is the one to be eliminated and adding the rest to
+      // the list of f_blocks associated with this e_block.
+      for (int c = 1; c < row.cells.size(); ++c) {
+        const Cell& cell = row.cells[c];
+        const int f_block_id = cell.block_id - num_eliminate_blocks;
+        CHECK_GE(f_block_id, 0);
+        f_blocks.insert(f_block_id);
+      }
+    }
+
+    for (set<int>::const_iterator block1 = f_blocks.begin();
+         block1 != f_blocks.end();
+         ++block1) {
+      set<int>::const_iterator block2 = block1;
+      ++block2;
+      for (; block2 != f_blocks.end(); ++block2) {
+        if (IsBlockPairInPreconditioner(*block1, *block2)) {
+          block_pairs_.insert(make_pair(*block1, *block2));
+        }
+      }
+    }
+  }
+
+  // The remaining rows which do not contain any e_blocks.
+  for (; r < num_row_blocks; ++r) {
+    const CompressedRow& row = bs.rows[r];
+    CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
+    for (int i = 0; i < row.cells.size(); ++i) {
+      const int block1 = row.cells[i].block_id - num_eliminate_blocks;
+      for (int j = 0; j < row.cells.size(); ++j) {
+        const int block2 = row.cells[j].block_id - num_eliminate_blocks;
+        if (block1 <= block2) {
+          if (IsBlockPairInPreconditioner(block1, block2)) {
+            block_pairs_.insert(make_pair(block1, block2));
+          }
+        }
+      }
+    }
+  }
+
+  VLOG(1) << "Block pair stats: " << block_pairs_.size();
+}
+
+// Initialize the SchurEliminator.
+void VisibilityBasedPreconditioner::InitEliminator(
+    const CompressedRowBlockStructure& bs) {
+  LinearSolver::Options eliminator_options;
+  eliminator_options.elimination_groups = options_.elimination_groups;
+  eliminator_options.num_threads = options_.num_threads;
+  eliminator_options.e_block_size = options_.e_block_size;
+  eliminator_options.f_block_size = options_.f_block_size;
+  eliminator_options.row_block_size = options_.row_block_size;
+  eliminator_options.context = options_.context;
+  eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+  const bool kFullRankETE = true;
+  eliminator_->Init(
+      eliminator_options.elimination_groups[0], kFullRankETE, &bs);
+}
+
+// Update the values of the preconditioner matrix and factorize it.
+bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+                                               const double* D) {
+  const time_t start_time = time(NULL);
+  const int num_rows = m_->num_rows();
+  CHECK_GT(num_rows, 0);
+
+  // Compute a subset of the entries of the Schur complement.
+  eliminator_->Eliminate(&A, nullptr, D, m_.get(), nullptr);
+
+  // Try factorizing the matrix. For CLUSTER_JACOBI, this should
+  // always succeed modulo some numerical/conditioning problems. For
+  // CLUSTER_TRIDIAGONAL, in general the preconditioner matrix as
+  // constructed is not positive definite. However, we will go ahead
+  // and try factorizing it. If it works, great, otherwise we scale
+  // all the cells in the preconditioner corresponding to the edges in
+  // the degree-2 forest and that guarantees positive
+  // definiteness. The proof of this fact can be found in Lemma 1 in
+  // "Visibility Based Preconditioning for Bundle Adjustment".
+  //
+  // Doing the factorization like this saves us matrix mass when
+  // scaling is not needed, which is quite often in our experience.
+  LinearSolverTerminationType status = Factorize();
+
+  if (status == LINEAR_SOLVER_FATAL_ERROR) {
+    return false;
+  }
+
+  // The scaling only affects the tri-diagonal case, since
+  // ScaleOffDiagonalBlocks only pays attention to the cells that
+  // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
+  // case, the preconditioner is guaranteed to be positive
+  // semidefinite.
+  if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
+    VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
+            << "scaling";
+    ScaleOffDiagonalCells();
+    status = Factorize();
+  }
+
+  VLOG(2) << "Compute time: " << time(NULL) - start_time;
+  return (status == LINEAR_SOLVER_SUCCESS);
+}
+
+// Consider the preconditioner matrix as meta-block matrix, whose
+// blocks correspond to the clusters. Then cluster pairs corresponding
+// to edges in the degree-2 forest are off diagonal entries of this
+// matrix. Scaling these off-diagonal entries by 1/2 forces this
+// matrix to be positive definite.
+void VisibilityBasedPreconditioner::ScaleOffDiagonalCells() {
+  for (const auto& block_pair : block_pairs_) {
+    const int block1 = block_pair.first;
+    const int block2 = block_pair.second;
+    if (!IsBlockPairOffDiagonal(block1, block2)) {
+      continue;
+    }
+
+    int r, c, row_stride, col_stride;
+    CellInfo* cell_info =
+        m_->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
+    CHECK(cell_info != NULL)
+        << "Cell missing for block pair (" << block1 << "," << block2 << ")"
+        << " cluster pair (" << cluster_membership_[block1] << " "
+        << cluster_membership_[block2] << ")";
+
+    // Ah the magic of tri-diagonal matrices and diagonal
+    // dominance. See Lemma 1 in "Visibility Based Preconditioning
+    // For Bundle Adjustment".
+    MatrixRef m(cell_info->values, row_stride, col_stride);
+    m.block(r, c, block_size_[block1], block_size_[block2]) *= 0.5;
+  }
+}
+
+// Compute the sparse Cholesky factorization of the preconditioner
+// matrix.
+LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() {
+  // Extract the TripletSparseMatrix that is used for actually storing
+  // S and convert it into a CompressedRowSparseMatrix.
+  const TripletSparseMatrix* tsm =
+      down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->mutable_matrix();
+
+  std::unique_ptr<CompressedRowSparseMatrix> lhs;
+  const CompressedRowSparseMatrix::StorageType storage_type =
+      sparse_cholesky_->StorageType();
+  if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+    lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+    lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+  } else {
+    lhs.reset(
+        CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+    lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+  }
+
+  std::string message;
+  return sparse_cholesky_->Factorize(lhs.get(), &message);
+}
+
+void VisibilityBasedPreconditioner::RightMultiply(const double* x,
+                                                  double* y) const {
+  CHECK(x != nullptr);
+  CHECK(y != nullptr);
+  CHECK(sparse_cholesky_ != nullptr);
+  std::string message;
+  sparse_cholesky_->Solve(x, y, &message);
+}
+
+int VisibilityBasedPreconditioner::num_rows() const { return m_->num_rows(); }
+
+// Classify camera/f_block pairs as in and out of the preconditioner,
+// based on whether the cluster pair that they belong to is in the
+// preconditioner or not.
+bool VisibilityBasedPreconditioner::IsBlockPairInPreconditioner(
+    const int block1, const int block2) const {
+  int cluster1 = cluster_membership_[block1];
+  int cluster2 = cluster_membership_[block2];
+  if (cluster1 > cluster2) {
+    swap(cluster1, cluster2);
+  }
+  return (cluster_pairs_.count(make_pair(cluster1, cluster2)) > 0);
+}
+
+bool VisibilityBasedPreconditioner::IsBlockPairOffDiagonal(
+    const int block1, const int block2) const {
+  return (cluster_membership_[block1] != cluster_membership_[block2]);
+}
+
+// Convert a graph into a list of edges that includes self edges for
+// each vertex.
+void VisibilityBasedPreconditioner::ForestToClusterPairs(
+    const WeightedGraph<int>& forest,
+    std::unordered_set<pair<int, int>, pair_hash >* cluster_pairs) const {
+  CHECK(cluster_pairs != nullptr);
+  cluster_pairs->clear();
+  const std::unordered_set<int>& vertices = forest.vertices();
+  CHECK_EQ(vertices.size(), num_clusters_);
+
+  // Add all the cluster pairs corresponding to the edges in the
+  // forest.
+  for (const int cluster1 : vertices) {
+    cluster_pairs->insert(make_pair(cluster1, cluster1));
+    const std::unordered_set<int>& neighbors = forest.Neighbors(cluster1);
+    for (const int cluster2 : neighbors) {
+      if (cluster1 < cluster2) {
+        cluster_pairs->insert(make_pair(cluster1, cluster2));
+      }
+    }
+  }
+}
+
+// The visibility set of a cluster is the union of the visibility sets
+// of all its cameras. In other words, the set of points visible to
+// any camera in the cluster.
+void VisibilityBasedPreconditioner::ComputeClusterVisibility(
+    const vector<set<int>>& visibility,
+    vector<set<int>>* cluster_visibility) const {
+  CHECK(cluster_visibility != nullptr);
+  cluster_visibility->resize(0);
+  cluster_visibility->resize(num_clusters_);
+  for (int i = 0; i < num_blocks_; ++i) {
+    const int cluster_id = cluster_membership_[i];
+    (*cluster_visibility)[cluster_id].insert(visibility[i].begin(),
+                                             visibility[i].end());
+  }
+}
+
+// Construct a graph whose vertices are the clusters, and the edge
+// weights are the number of 3D points visible to cameras in both the
+// vertices.
+WeightedGraph<int>* VisibilityBasedPreconditioner::CreateClusterGraph(
+    const vector<set<int>>& cluster_visibility) const {
+  WeightedGraph<int>* cluster_graph = new WeightedGraph<int>;
+
+  for (int i = 0; i < num_clusters_; ++i) {
+    cluster_graph->AddVertex(i);
+  }
+
+  for (int i = 0; i < num_clusters_; ++i) {
+    const set<int>& cluster_i = cluster_visibility[i];
+    for (int j = i + 1; j < num_clusters_; ++j) {
+      vector<int> intersection;
+      const set<int>& cluster_j = cluster_visibility[j];
+      set_intersection(cluster_i.begin(),
+                       cluster_i.end(),
+                       cluster_j.begin(),
+                       cluster_j.end(),
+                       back_inserter(intersection));
+
+      if (intersection.size() > 0) {
+        // Clusters interact strongly when they share a large number
+        // of 3D points. The degree-2 maximum spanning forest
+        // algorithm, iterates on the edges in decreasing order of
+        // their weight, which is the number of points shared by the
+        // two cameras that it connects.
+        cluster_graph->AddEdge(i, j, intersection.size());
+      }
+    }
+  }
+  return cluster_graph;
+}
+
+// Canonical views clustering returns a std::unordered_map from vertices to
+// cluster ids. Convert this into a flat array for quick lookup. It is
+// possible that some of the vertices may not be associated with any
+// cluster. In that case, randomly assign them to one of the clusters.
+//
+// The cluster ids can be non-contiguous integers. So as we flatten
+// the membership_map, we also map the cluster ids to a contiguous set
+// of integers so that the cluster ids are in [0, num_clusters_).
+void VisibilityBasedPreconditioner::FlattenMembershipMap(
+    const std::unordered_map<int, int>& membership_map,
+    vector<int>* membership_vector) const {
+  CHECK(membership_vector != nullptr);
+  membership_vector->resize(0);
+  membership_vector->resize(num_blocks_, -1);
+
+  std::unordered_map<int, int> cluster_id_to_index;
+  // Iterate over the cluster membership map and update the
+  // cluster_membership_ vector assigning arbitrary cluster ids to
+  // the few cameras that have not been clustered.
+  for (const auto& m : membership_map) {
+    const int camera_id = m.first;
+    int cluster_id = m.second;
+
+    // If the view was not clustered, randomly assign it to one of the
+    // clusters. This preserves the mathematical correctness of the
+    // preconditioner. If there are too many views which are not
+    // clustered, it may lead to some quality degradation though.
+    //
+    // TODO(sameeragarwal): Check if a large number of views have not
+    // been clustered and deal with it?
+    if (cluster_id == -1) {
+      cluster_id = camera_id % num_clusters_;
+    }
+
+    const int index = FindWithDefault(
+        cluster_id_to_index, cluster_id, cluster_id_to_index.size());
+
+    if (index == cluster_id_to_index.size()) {
+      cluster_id_to_index[cluster_id] = index;
+    }
+
+    CHECK_LT(index, num_clusters_);
+    membership_vector->at(camera_id) = index;
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
new file mode 100644
index 0000000..31ba171
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -0,0 +1,201 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Preconditioners for linear systems that arise in Structure from
+// Motion problems. VisibilityBasedPreconditioner implements:
+//
+//  CLUSTER_JACOBI
+//  CLUSTER_TRIDIAGONAL
+//
+// Detailed descriptions of these preconditions beyond what is
+// documented here can be found in
+//
+// Visibility Based Preconditioning for Bundle Adjustment
+// A. Kushal & S. Agarwal, CVPR 2012.
+//
+// http://www.cs.washington.edu/homes/sagarwal/vbp.pdf
+//
+// The two preconditioners share enough code that its most efficient
+// to implement them as part of the same code base.
+
+#ifndef CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
+#define CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
+
+#include <memory>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "ceres/graph.h"
+#include "ceres/linear_solver.h"
+#include "ceres/pair_hash.h"
+#include "ceres/preconditioner.h"
+#include "ceres/sparse_cholesky.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockRandomAccessSparseMatrix;
+class BlockSparseMatrix;
+struct CompressedRowBlockStructure;
+class SchurEliminatorBase;
+
+// This class implements visibility based preconditioners for
+// Structure from Motion/Bundle Adjustment problems. The name
+// VisibilityBasedPreconditioner comes from the fact that the sparsity
+// structure of the preconditioner matrix is determined by analyzing
+// the visibility structure of the scene, i.e. which cameras see which
+// points.
+//
+// The key idea of visibility based preconditioning is to identify
+// cameras that we expect have strong interactions, and then using the
+// entries in the Schur complement matrix corresponding to these
+// camera pairs as an approximation to the full Schur complement.
+//
+// CLUSTER_JACOBI identifies these camera pairs by clustering cameras,
+// and considering all non-zero camera pairs within each cluster. The
+// clustering in the current implementation is done using the
+// Canonical Views algorithm of Simon et al. (see
+// canonical_views_clustering.h). For the purposes of clustering, the
+// similarity or the degree of interaction between a pair of cameras
+// is measured by counting the number of points visible in both the
+// cameras. Thus the name VisibilityBasedPreconditioner. Further, if we
+// were to permute the parameter blocks such that all the cameras in
+// the same cluster occur contiguously, the preconditioner matrix will
+// be a block diagonal matrix with blocks corresponding to the
+// clusters. Thus in analogy with the Jacobi preconditioner we refer
+// to this as the CLUSTER_JACOBI preconditioner.
+//
+// CLUSTER_TRIDIAGONAL adds more mass to the CLUSTER_JACOBI
+// preconditioner by considering the interaction between clusters and
+// identifying strong interactions between cluster pairs. This is done
+// by constructing a weighted graph on the clusters, with the weight
+// on the edges connecting two clusters proportional to the number of
+// 3D points visible to cameras in both the clusters. A degree-2
+// maximum spanning forest is identified in this graph and the camera
+// pairs contained in the edges of this forest are added to the
+// preconditioner. The detailed reasoning for this construction is
+// explained in the paper mentioned above.
+//
+// Degree-2 spanning trees and forests have the property that they
+// correspond to tri-diagonal matrices. Thus there exist a permutation
+// of the camera blocks under which the CLUSTER_TRIDIAGONAL
+// preconditioner matrix is a block tridiagonal matrix, and thus the
+// name for the preconditioner.
+//
+// Thread Safety: This class is NOT thread safe.
+//
+// Example usage:
+//
+//   LinearSolver::Options options;
+//   options.preconditioner_type = CLUSTER_JACOBI;
+//   options.elimination_groups.push_back(num_points);
+//   options.elimination_groups.push_back(num_cameras);
+//   VisibilityBasedPreconditioner preconditioner(
+//      *A.block_structure(), options);
+//   preconditioner.Update(A, NULL);
+//   preconditioner.RightMultiply(x, y);
+class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
+ public:
+  // Initialize the symbolic structure of the preconditioner. bs is
+  // the block structure of the linear system to be solved. It is used
+  // to determine the sparsity structure of the preconditioner matrix.
+  //
+  // It has the same structural requirement as other Schur complement
+  // based solvers. Please see schur_eliminator.h for more details.
+  VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
+                                const Preconditioner::Options& options);
+  VisibilityBasedPreconditioner(const VisibilityBasedPreconditioner&) = delete;
+  void operator=(const VisibilityBasedPreconditioner&) = delete;
+
+  virtual ~VisibilityBasedPreconditioner();
+
+  // Preconditioner interface
+  virtual void RightMultiply(const double* x, double* y) const;
+  virtual int num_rows() const;
+
+  friend class VisibilityBasedPreconditionerTest;
+
+ private:
+  virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+  void ComputeClusterJacobiSparsity(const CompressedRowBlockStructure& bs);
+  void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
+  void InitStorage(const CompressedRowBlockStructure& bs);
+  void InitEliminator(const CompressedRowBlockStructure& bs);
+  LinearSolverTerminationType Factorize();
+  void ScaleOffDiagonalCells();
+
+  void ClusterCameras(const std::vector<std::set<int>>& visibility);
+  void FlattenMembershipMap(const std::unordered_map<int, int>& membership_map,
+                            std::vector<int>* membership_vector) const;
+  void ComputeClusterVisibility(
+      const std::vector<std::set<int>>& visibility,
+      std::vector<std::set<int>>* cluster_visibility) const;
+  WeightedGraph<int>* CreateClusterGraph(
+      const std::vector<std::set<int>>& visibility) const;
+  void ForestToClusterPairs(const WeightedGraph<int>& forest,
+                            std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
+  void ComputeBlockPairsInPreconditioner(const CompressedRowBlockStructure& bs);
+  bool IsBlockPairInPreconditioner(int block1, int block2) const;
+  bool IsBlockPairOffDiagonal(int block1, int block2) const;
+
+  Preconditioner::Options options_;
+
+  // Number of parameter blocks in the schur complement.
+  int num_blocks_;
+  int num_clusters_;
+
+  // Sizes of the blocks in the schur complement.
+  std::vector<int> block_size_;
+
+  // Mapping from cameras to clusters.
+  std::vector<int> cluster_membership_;
+
+  // Non-zero camera pairs from the schur complement matrix that are
+  // present in the preconditioner, sorted by row (first element of
+  // each pair), then column (second).
+  std::set<std::pair<int, int>> block_pairs_;
+
+  // Set of cluster pairs (including self pairs (i,i)) in the
+  // preconditioner.
+  std::unordered_set<std::pair<int, int>, pair_hash> cluster_pairs_;
+  std::unique_ptr<SchurEliminatorBase> eliminator_;
+
+  // Preconditioner matrix.
+  std::unique_ptr<BlockRandomAccessSparseMatrix> m_;
+  std::unique_ptr<SparseCholesky> sparse_cholesky_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
new file mode 100644
index 0000000..a006d98
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -0,0 +1,339 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility_based_preconditioner.h"
+
+#include <memory>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/file.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): Re-enable this test once serialization is
+// working again.
+
+// using testing::AssertionResult;
+// using testing::AssertionSuccess;
+// using testing::AssertionFailure;
+
+// static const double kTolerance = 1e-12;
+
+// class VisibilityBasedPreconditionerTest : public ::testing::Test {
+//  public:
+//   static const int kCameraSize = 9;
+
+//  protected:
+//   void SetUp() {
+//     string input_file = TestFileAbsolutePath("problem-6-1384-000.lsqp");
+
+//     std::unique_ptr<LinearLeastSquaresProblem> problem(
+//         CHECK_NOTNULL(CreateLinearLeastSquaresProblemFromFile(input_file)));
+//     A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+//     b_.reset(problem->b.release());
+//     D_.reset(problem->D.release());
+
+//     const CompressedRowBlockStructure* bs =
+//         CHECK_NOTNULL(A_->block_structure());
+//     const int num_col_blocks = bs->cols.size();
+
+//     num_cols_ = A_->num_cols();
+//     num_rows_ = A_->num_rows();
+//     num_eliminate_blocks_ = problem->num_eliminate_blocks;
+//     num_camera_blocks_ = num_col_blocks - num_eliminate_blocks_;
+//     options_.elimination_groups.push_back(num_eliminate_blocks_);
+//     options_.elimination_groups.push_back(
+//         A_->block_structure()->cols.size() - num_eliminate_blocks_);
+
+//     vector<int> blocks(num_col_blocks - num_eliminate_blocks_, 0);
+//     for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+//       blocks[i - num_eliminate_blocks_] = bs->cols[i].size;
+//     }
+
+//     // The input matrix is a real jacobian and fairly poorly
+//     // conditioned. Setting D to a large constant makes the normal
+//     // equations better conditioned and makes the tests below better
+//     // conditioned.
+//     VectorRef(D_.get(), num_cols_).setConstant(10.0);
+
+//     schur_complement_.reset(new BlockRandomAccessDenseMatrix(blocks));
+//     Vector rhs(schur_complement_->num_rows());
+
+//     std::unique_ptr<SchurEliminatorBase> eliminator;
+//     LinearSolver::Options eliminator_options;
+//     eliminator_options.elimination_groups = options_.elimination_groups;
+//     eliminator_options.num_threads = options_.num_threads;
+
+//     eliminator.reset(SchurEliminatorBase::Create(eliminator_options));
+//     eliminator->Init(num_eliminate_blocks_, bs);
+//     eliminator->Eliminate(A_.get(), b_.get(), D_.get(),
+//                           schur_complement_.get(), rhs.data());
+//   }
+
+//   AssertionResult IsSparsityStructureValid() {
+//     preconditioner_->InitStorage(*A_->block_structure());
+//     const std::unordered_set<pair<int, int>, pair_hash>& cluster_pairs =
+//     get_cluster_pairs(); const vector<int>& cluster_membership =
+//     get_cluster_membership();
+
+//     for (int i = 0; i < num_camera_blocks_; ++i) {
+//       for (int j = i; j < num_camera_blocks_; ++j) {
+//         if (cluster_pairs.count(make_pair(cluster_membership[i],
+//                                           cluster_membership[j]))) {
+//           if (!IsBlockPairInPreconditioner(i, j)) {
+//             return AssertionFailure()
+//                 << "block pair (" << i << "," << j << "missing";
+//           }
+//         } else {
+//           if (IsBlockPairInPreconditioner(i, j)) {
+//             return AssertionFailure()
+//                << "block pair (" << i << "," << j << "should not be present";
+//           }
+//         }
+//       }
+//     }
+//     return AssertionSuccess();
+//   }
+
+//   AssertionResult PreconditionerValuesMatch() {
+//     preconditioner_->Update(*A_, D_.get());
+//     const std::unordered_set<pair<int, int>, pair_hash>& cluster_pairs =
+//     get_cluster_pairs(); const BlockRandomAccessSparseMatrix* m = get_m();
+//     Matrix preconditioner_matrix;
+//     m->matrix()->ToDenseMatrix(&preconditioner_matrix);
+//     ConstMatrixRef full_schur_complement(schur_complement_->values(),
+//                                          m->num_rows(),
+//                                          m->num_rows());
+//     const int num_clusters = get_num_clusters();
+//     const int kDiagonalBlockSize =
+//         kCameraSize * num_camera_blocks_ / num_clusters;
+
+//     for (int i = 0; i < num_clusters; ++i) {
+//       for (int j = i; j < num_clusters; ++j) {
+//         double diff = 0.0;
+//         if (cluster_pairs.count(make_pair(i, j))) {
+//           diff =
+//               (preconditioner_matrix.block(kDiagonalBlockSize * i,
+//                                            kDiagonalBlockSize * j,
+//                                            kDiagonalBlockSize,
+//                                            kDiagonalBlockSize) -
+//                full_schur_complement.block(kDiagonalBlockSize * i,
+//                                            kDiagonalBlockSize * j,
+//                                            kDiagonalBlockSize,
+//                                            kDiagonalBlockSize)).norm();
+//         } else {
+//           diff = preconditioner_matrix.block(kDiagonalBlockSize * i,
+//                                              kDiagonalBlockSize * j,
+//                                              kDiagonalBlockSize,
+//                                              kDiagonalBlockSize).norm();
+//         }
+//         if (diff > kTolerance) {
+//           return AssertionFailure()
+//               << "Preconditioner block " << i << " " << j << " differs "
+//               << "from expected value by " << diff;
+//         }
+//       }
+//     }
+//     return AssertionSuccess();
+//   }
+
+//   // Accessors
+//   int get_num_blocks() { return preconditioner_->num_blocks_; }
+
+//   int get_num_clusters() { return preconditioner_->num_clusters_; }
+//   int* get_mutable_num_clusters() { return &preconditioner_->num_clusters_; }
+
+//   const vector<int>& get_block_size() {
+//     return preconditioner_->block_size_; }
+
+//   vector<int>* get_mutable_block_size() {
+//     return &preconditioner_->block_size_; }
+
+//   const vector<int>& get_cluster_membership() {
+//     return preconditioner_->cluster_membership_;
+//   }
+
+//   vector<int>* get_mutable_cluster_membership() {
+//     return &preconditioner_->cluster_membership_;
+//   }
+
+//   const set<pair<int, int>>& get_block_pairs() {
+//     return preconditioner_->block_pairs_;
+//   }
+
+//   set<pair<int, int>>* get_mutable_block_pairs() {
+//     return &preconditioner_->block_pairs_;
+//   }
+
+//   const std::unordered_set<pair<int, int>, pair_hash>& get_cluster_pairs() {
+//     return preconditioner_->cluster_pairs_;
+//   }
+
+//   std::unordered_set<pair<int, int>, pair_hash>* get_mutable_cluster_pairs()
+//   {
+//     return &preconditioner_->cluster_pairs_;
+//   }
+
+//   bool IsBlockPairInPreconditioner(const int block1, const int block2) {
+//     return preconditioner_->IsBlockPairInPreconditioner(block1, block2);
+//   }
+
+//   bool IsBlockPairOffDiagonal(const int block1, const int block2) {
+//     return preconditioner_->IsBlockPairOffDiagonal(block1, block2);
+//   }
+
+//   const BlockRandomAccessSparseMatrix* get_m() {
+//     return preconditioner_->m_.get();
+//   }
+
+//   int num_rows_;
+//   int num_cols_;
+//   int num_eliminate_blocks_;
+//   int num_camera_blocks_;
+
+//   std::unique_ptr<BlockSparseMatrix> A_;
+//   std::unique_ptr<double[]> b_;
+//   std::unique_ptr<double[]> D_;
+
+//   Preconditioner::Options options_;
+//   std::unique_ptr<VisibilityBasedPreconditioner> preconditioner_;
+//   std::unique_ptr<BlockRandomAccessDenseMatrix> schur_complement_;
+// };
+
+// TEST_F(VisibilityBasedPreconditionerTest, OneClusterClusterJacobi) {
+//   options_.type = CLUSTER_JACOBI;
+//   preconditioner_.reset(
+//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+
+//   // Override the clustering to be a single clustering containing all
+//   // the cameras.
+//   vector<int>& cluster_membership = *get_mutable_cluster_membership();
+//   for (int i = 0; i < num_camera_blocks_; ++i) {
+//     cluster_membership[i] = 0;
+//   }
+
+//   *get_mutable_num_clusters() = 1;
+
+//   std::unordered_set<pair<int, int>, pair_hash>& cluster_pairs =
+//   *get_mutable_cluster_pairs(); cluster_pairs.clear();
+//   cluster_pairs.insert(make_pair(0, 0));
+
+//   EXPECT_TRUE(IsSparsityStructureValid());
+//   EXPECT_TRUE(PreconditionerValuesMatch());
+
+//   // Multiplication by the inverse of the preconditioner.
+//   const int num_rows = schur_complement_->num_rows();
+//   ConstMatrixRef full_schur_complement(schur_complement_->values(),
+//                                        num_rows,
+//                                        num_rows);
+//   Vector x(num_rows);
+//   Vector y(num_rows);
+//   Vector z(num_rows);
+
+//   for (int i = 0; i < num_rows; ++i) {
+//     x.setZero();
+//     y.setZero();
+//     z.setZero();
+//     x[i] = 1.0;
+//     preconditioner_->RightMultiply(x.data(), y.data());
+//     z = full_schur_complement
+//         .selfadjointView<Eigen::Upper>()
+//         .llt().solve(x);
+//     double max_relative_difference =
+//         ((y - z).array() / z.array()).matrix().lpNorm<Eigen::Infinity>();
+//     EXPECT_NEAR(max_relative_difference, 0.0, kTolerance);
+//   }
+// }
+
+// TEST_F(VisibilityBasedPreconditionerTest, ClusterJacobi) {
+//   options_.type = CLUSTER_JACOBI;
+//   preconditioner_.reset(
+//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+
+//   // Override the clustering to be equal number of cameras.
+//   vector<int>& cluster_membership = *get_mutable_cluster_membership();
+//   cluster_membership.resize(num_camera_blocks_);
+//   static const int kNumClusters = 3;
+
+//   for (int i = 0; i < num_camera_blocks_; ++i) {
+//     cluster_membership[i] = (i * kNumClusters) / num_camera_blocks_;
+//   }
+//   *get_mutable_num_clusters() = kNumClusters;
+
+//   std::unordered_set<pair<int, int>, pair_hash>& cluster_pairs =
+//   *get_mutable_cluster_pairs(); cluster_pairs.clear(); for (int i = 0; i <
+//   kNumClusters; ++i) {
+//     cluster_pairs.insert(make_pair(i, i));
+//   }
+
+//   EXPECT_TRUE(IsSparsityStructureValid());
+//   EXPECT_TRUE(PreconditionerValuesMatch());
+// }
+
+// TEST_F(VisibilityBasedPreconditionerTest, ClusterTridiagonal) {
+//   options_.type = CLUSTER_TRIDIAGONAL;
+//   preconditioner_.reset(
+//       new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+//   static const int kNumClusters = 3;
+
+//   // Override the clustering to be 3 clusters.
+//   vector<int>& cluster_membership = *get_mutable_cluster_membership();
+//   cluster_membership.resize(num_camera_blocks_);
+//   for (int i = 0; i < num_camera_blocks_; ++i) {
+//     cluster_membership[i] = (i * kNumClusters) / num_camera_blocks_;
+//   }
+//   *get_mutable_num_clusters() = kNumClusters;
+
+//   // Spanning forest has structure 0-1 2
+//   std::unordered_set<pair<int, int>, pair_hash>& cluster_pairs =
+//   *get_mutable_cluster_pairs(); cluster_pairs.clear(); for (int i = 0; i <
+//   kNumClusters; ++i) {
+//     cluster_pairs.insert(make_pair(i, i));
+//   }
+//   cluster_pairs.insert(make_pair(0, 1));
+
+//   EXPECT_TRUE(IsSparsityStructureValid());
+//   EXPECT_TRUE(PreconditionerValuesMatch());
+// }
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
new file mode 100644
index 0000000..5028e01
--- /dev/null
+++ b/internal/ceres/visibility_test.cc
@@ -0,0 +1,208 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+//         sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility.h"
+
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "ceres/block_structure.h"
+#include "ceres/graph.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using std::set;
+using std::vector;
+
+class VisibilityTest : public ::testing::Test {
+};
+
+TEST(VisibilityTest, SimpleMatrix) {
+  //   A = [1 0 0 0 0 1
+  //        1 0 0 1 0 0
+  //        0 1 1 0 0 0
+  //        0 1 0 0 1 0]
+
+  int num_cols = 6;
+  int num_eliminate_blocks = 2;
+  CompressedRowBlockStructure bs;
+
+  // Row 1
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+    row.cells.push_back(Cell(5, 0));
+  }
+
+  // Row 2
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 2;
+    row.cells.push_back(Cell(0, 1));
+    row.cells.push_back(Cell(3, 1));
+  }
+
+  // Row 3
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 4;
+    row.cells.push_back(Cell(1, 2));
+    row.cells.push_back(Cell(2, 2));
+  }
+
+  // Row 4
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 6;
+    row.cells.push_back(Cell(1, 3));
+    row.cells.push_back(Cell(4, 3));
+  }
+  bs.cols.resize(num_cols);
+
+  vector< set<int>> visibility;
+  ComputeVisibility(bs, num_eliminate_blocks, &visibility);
+  ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
+  for (int i = 0; i < visibility.size(); ++i) {
+    ASSERT_EQ(visibility[i].size(), 1);
+  }
+
+  std::unique_ptr<WeightedGraph<int> > graph(CreateSchurComplementGraph(visibility));
+  EXPECT_EQ(graph->vertices().size(), visibility.size());
+  for (int i = 0; i < visibility.size(); ++i) {
+    EXPECT_EQ(graph->VertexWeight(i), 1.0);
+  }
+
+  for (int i = 0; i < visibility.size(); ++i) {
+    for (int j = i; j < visibility.size(); ++j) {
+      double edge_weight = 0.0;
+      if ((i == 1 && j == 3) || (i == 0 && j == 2) || (i == j)) {
+        edge_weight = 1.0;
+      }
+
+      EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
+          << "Edge: " << i << " " << j
+          << " weight: " << graph->EdgeWeight(i, j)
+          << " expected weight: " << edge_weight;
+    }
+  }
+}
+
+
+TEST(VisibilityTest, NoEBlocks) {
+  //   A = [1 0 0 0 0 0
+  //        1 0 0 0 0 0
+  //        0 1 0 0 0 0
+  //        0 1 0 0 0 0]
+
+  int num_cols = 6;
+  int num_eliminate_blocks = 2;
+  CompressedRowBlockStructure bs;
+
+  // Row 1
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 0;
+    row.cells.push_back(Cell(0, 0));
+  }
+
+  // Row 2
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 2;
+    row.cells.push_back(Cell(0, 1));
+  }
+
+  // Row 3
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 4;
+    row.cells.push_back(Cell(1, 2));
+  }
+
+  // Row 4
+  {
+    bs.rows.push_back(CompressedRow());
+    CompressedRow& row = bs.rows.back();
+    row.block.size = 2;
+    row.block.position = 6;
+    row.cells.push_back(Cell(1, 3));
+  }
+  bs.cols.resize(num_cols);
+
+  vector<set<int>> visibility;
+  ComputeVisibility(bs, num_eliminate_blocks, &visibility);
+  ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
+  for (int i = 0; i < visibility.size(); ++i) {
+    ASSERT_EQ(visibility[i].size(), 0);
+  }
+
+  std::unique_ptr<WeightedGraph<int> > graph(
+					     CreateSchurComplementGraph(visibility));
+  EXPECT_EQ(graph->vertices().size(), visibility.size());
+  for (int i = 0; i < visibility.size(); ++i) {
+    EXPECT_EQ(graph->VertexWeight(i), 1.0);
+  }
+
+  for (int i = 0; i < visibility.size(); ++i) {
+    for (int j = i; j < visibility.size(); ++j) {
+      double edge_weight = 0.0;
+      if (i == j) {
+        edge_weight = 1.0;
+      }
+      EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
+          << "Edge: " << i << " " << j
+          << " weight: " << graph->EdgeWeight(i, j)
+          << " expected weight: " << edge_weight;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/wall_time.cc b/internal/ceres/wall_time.cc
new file mode 100644
index 0000000..09e3c4a
--- /dev/null
+++ b/internal/ceres/wall_time.cc
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#include "ceres/wall_time.h"
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#else
+#include <ctime>
+#endif
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/time.h>
+#endif
+
+namespace ceres {
+namespace internal {
+
+double WallTimeInSeconds() {
+#ifdef CERES_USE_OPENMP
+  return omp_get_wtime();
+#else
+#ifdef _WIN32
+  LARGE_INTEGER count;
+  LARGE_INTEGER frequency;
+  QueryPerformanceCounter(&count);
+  QueryPerformanceFrequency(&frequency);
+  return static_cast<double>(count.QuadPart) /
+         static_cast<double>(frequency.QuadPart);
+#else
+  timeval time_val;
+  gettimeofday(&time_val, NULL);
+  return (time_val.tv_sec + time_val.tv_usec * 1e-6);
+#endif
+#endif
+}
+
+EventLogger::EventLogger(const std::string& logger_name)
+    : start_time_(WallTimeInSeconds()),
+      last_event_time_(start_time_),
+      events_("") {
+  StringAppendF(&events_,
+                "\n%s\n                                   Delta   Cumulative\n",
+                logger_name.c_str());
+}
+
+EventLogger::~EventLogger() {
+  if (VLOG_IS_ON(3)) {
+    AddEvent("Total");
+    VLOG(2) << "\n" << events_ << "\n";
+  }
+}
+
+void EventLogger::AddEvent(const std::string& event_name) {
+  if (!VLOG_IS_ON(3)) {
+    return;
+  }
+
+  const double current_time = WallTimeInSeconds();
+  const double relative_time_delta = current_time - last_event_time_;
+  const double absolute_time_delta = current_time - start_time_;
+  last_event_time_ = current_time;
+
+  StringAppendF(&events_,
+                "  %30s : %10.5f   %10.5f\n",
+                event_name.c_str(),
+                relative_time_delta,
+                absolute_time_delta);
+}
+
+}  // namespace internal
+}  // namespace ceres
diff --git a/internal/ceres/wall_time.h b/internal/ceres/wall_time.h
new file mode 100644
index 0000000..966aa67
--- /dev/null
+++ b/internal/ceres/wall_time.h
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+//   this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+//   this list of conditions and the following disclaimer in the documentation
+//   and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+//   used to endorse or promote products derived from this software without
+//   specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_INTERNAL_WALL_TIME_H_
+#define CERES_INTERNAL_WALL_TIME_H_
+
+#include <map>
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// Returns time, in seconds, from some arbitrary starting point. If
+// OpenMP is available then the high precision openmp_get_wtime()
+// function is used. Otherwise on unixes, gettimeofday is used. The
+// granularity is in seconds on windows systems.
+double WallTimeInSeconds();
+
+// Log a series of events, recording for each event the time elapsed
+// since the last event and since the creation of the object.
+//
+// The information is output to VLOG(3) upon destruction. A
+// name::Total event is added as the final event right before
+// destruction.
+//
+// Example usage:
+//
+//  void Foo() {
+//    EventLogger event_logger("Foo");
+//    Bar1();
+//    event_logger.AddEvent("Bar1")
+//    Bar2();
+//    event_logger.AddEvent("Bar2")
+//    Bar3();
+//  }
+//
+// Will produce output that looks like
+//
+//  Foo
+//      Bar1:  time1  time1
+//      Bar2:  time2  time1 + time2;
+//     Total:  time3  time1 + time2 + time3;
+class EventLogger {
+ public:
+  explicit EventLogger(const std::string& logger_name);
+  ~EventLogger();
+  void AddEvent(const std::string& event_name);
+
+ private:
+  const double start_time_;
+  double last_event_time_;
+  std::string events_;
+};
+
+}  // namespace internal
+}  // namespace ceres
+
+#endif  // CERES_INTERNAL_WALL_TIME_H_